< prev index next >

src/share/vm/gc/g1/heapRegion.inline.hpp

Print this page




  98 // this is used for larger LAB allocations only.
  99 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size,
 100                                                         size_t desired_word_size,
 101                                                         size_t* actual_size) {
 102   MutexLocker x(&_par_alloc_lock);
 103   return allocate(min_word_size, desired_word_size, actual_size);
 104 }
 105 
 106 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
 107   return _offsets.block_start(p);
 108 }
 109 
 110 inline HeapWord*
 111 G1OffsetTableContigSpace::block_start_const(const void* p) const {
 112   return _offsets.block_start_const(p);
 113 }
 114 
 115 inline bool
 116 HeapRegion::block_is_obj(const HeapWord* p) const {
 117   G1CollectedHeap* g1h = G1CollectedHeap::heap();











 118   if (ClassUnloadingWithConcurrentMark) {
 119     return !g1h->is_obj_dead(oop(p), this);
 120   }
 121   return p < top();
 122 }
 123 
 124 inline size_t
 125 HeapRegion::block_size(const HeapWord *addr) const {
 126   if (addr == top()) {
 127     return pointer_delta(end(), addr);
 128   }
 129 
 130   if (block_is_obj(addr)) {
 131     return oop(addr)->size();
 132   }
 133 
 134   assert(ClassUnloadingWithConcurrentMark,
 135          "All blocks should be objects if G1 Class Unloading isn't used. "
 136          "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 137          "addr: " PTR_FORMAT,


 159   size_t temp;
 160   return allocate_no_bot_updates(word_size, word_size, &temp);
 161 }
 162 
 163 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 164                                                      size_t desired_word_size,
 165                                                      size_t* actual_word_size) {
 166   assert(is_young(), "we can only skip BOT updates on young regions");
 167   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 168 }
 169 
 170 inline void HeapRegion::note_start_of_marking() {
 171   _next_marked_bytes = 0;
 172   _next_top_at_mark_start = top();
 173 }
 174 
 175 inline void HeapRegion::note_end_of_marking() {
 176   _prev_top_at_mark_start = _next_top_at_mark_start;
 177   _prev_marked_bytes = _next_marked_bytes;
 178   _next_marked_bytes = 0;
 179 
 180   assert(_prev_marked_bytes <=
 181          (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
 182          HeapWordSize, "invariant");
 183 }
 184 
 185 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
 186   if (is_survivor()) {
 187     // This is how we always allocate survivors.
 188     assert(_next_top_at_mark_start == bottom(), "invariant");
 189   } else {
 190     if (during_initial_mark) {
 191       // During initial-mark we'll explicitly mark any objects on old
 192       // regions that are pointed to by roots. Given that explicit
 193       // marks only make sense under NTAMS it'd be nice if we could
 194       // check that condition if we wanted to. Given that we don't
 195       // know where the top of this region will end up, we simply set
 196       // NTAMS to the end of the region so all marks will be below
 197       // NTAMS. We'll set it to the actual top when we retire this region.
 198       _next_top_at_mark_start = end();
 199     } else {
 200       // We could have re-used this old region as to-space over a
 201       // couple of GCs since the start of the concurrent marking
 202       // cycle. This means that [bottom,NTAMS) will contain objects




  98 // this is used for larger LAB allocations only.
  99 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size,
 100                                                         size_t desired_word_size,
 101                                                         size_t* actual_size) {
 102   MutexLocker x(&_par_alloc_lock);
 103   return allocate(min_word_size, desired_word_size, actual_size);
 104 }
 105 
 106 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
 107   return _offsets.block_start(p);
 108 }
 109 
 110 inline HeapWord*
 111 G1OffsetTableContigSpace::block_start_const(const void* p) const {
 112   return _offsets.block_start_const(p);
 113 }
 114 
 115 inline bool
 116 HeapRegion::block_is_obj(const HeapWord* p) const {
 117   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 118 
 119   if (!this->is_in(p)) {
 120     HeapRegion* hr = g1h->heap_region_containing(p);
 121 #ifdef ASSERT
 122     assert(hr->is_humongous(), "This case can only happen for humongous regions");
 123     oop obj = oop(hr->humongous_start_region()->bottom());
 124     assert((HeapWord*)obj <= p, "p must be in humongous object");
 125     assert(p <= (HeapWord*)obj + obj->size(), "p must be in humongous object");
 126 #endif
 127     return hr->block_is_obj(p);
 128   }
 129   if (ClassUnloadingWithConcurrentMark) {
 130     return !g1h->is_obj_dead(oop(p), this);
 131   }
 132   return p < top();
 133 }
 134 
 135 inline size_t
 136 HeapRegion::block_size(const HeapWord *addr) const {
 137   if (addr == top()) {
 138     return pointer_delta(end(), addr);
 139   }
 140 
 141   if (block_is_obj(addr)) {
 142     return oop(addr)->size();
 143   }
 144 
 145   assert(ClassUnloadingWithConcurrentMark,
 146          "All blocks should be objects if G1 Class Unloading isn't used. "
 147          "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 148          "addr: " PTR_FORMAT,


 170   size_t temp;
 171   return allocate_no_bot_updates(word_size, word_size, &temp);
 172 }
 173 
 174 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 175                                                      size_t desired_word_size,
 176                                                      size_t* actual_word_size) {
 177   assert(is_young(), "we can only skip BOT updates on young regions");
 178   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 179 }
 180 
 181 inline void HeapRegion::note_start_of_marking() {
 182   _next_marked_bytes = 0;
 183   _next_top_at_mark_start = top();
 184 }
 185 
 186 inline void HeapRegion::note_end_of_marking() {
 187   _prev_top_at_mark_start = _next_top_at_mark_start;
 188   _prev_marked_bytes = _next_marked_bytes;
 189   _next_marked_bytes = 0;




 190 }
 191 
 192 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
 193   if (is_survivor()) {
 194     // This is how we always allocate survivors.
 195     assert(_next_top_at_mark_start == bottom(), "invariant");
 196   } else {
 197     if (during_initial_mark) {
 198       // During initial-mark we'll explicitly mark any objects on old
 199       // regions that are pointed to by roots. Given that explicit
 200       // marks only make sense under NTAMS it'd be nice if we could
 201       // check that condition if we wanted to. Given that we don't
 202       // know where the top of this region will end up, we simply set
 203       // NTAMS to the end of the region so all marks will be below
 204       // NTAMS. We'll set it to the actual top when we retire this region.
 205       _next_top_at_mark_start = end();
 206     } else {
 207       // We could have re-used this old region as to-space over a
 208       // couple of GCs since the start of the concurrent marking
 209       // cycle. This means that [bottom,NTAMS) will contain objects


< prev index next >