< prev index next >

src/share/vm/gc/g1/heapRegion.inline.hpp

Print this page




  98 // this is used for larger LAB allocations only.
  99 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size,
 100                                                         size_t desired_word_size,
 101                                                         size_t* actual_size) {
 102   MutexLocker x(&_par_alloc_lock);
 103   return allocate(min_word_size, desired_word_size, actual_size);
 104 }
 105 
 106 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
 107   return _offsets.block_start(p);
 108 }
 109 
 110 inline HeapWord*
 111 G1OffsetTableContigSpace::block_start_const(const void* p) const {
 112   return _offsets.block_start_const(p);
 113 }
 114 
 115 inline bool
 116 HeapRegion::block_is_obj(const HeapWord* p) const {
 117   G1CollectedHeap* g1h = G1CollectedHeap::heap();





 118   if (ClassUnloadingWithConcurrentMark) {
 119     return !g1h->is_obj_dead(oop(p), this);
 120   }
 121   return p < top();
 122 }
 123 
 124 inline size_t
 125 HeapRegion::block_size(const HeapWord *addr) const {
 126   if (addr == top()) {
 127     return pointer_delta(end(), addr);
 128   }
 129 
 130   if (block_is_obj(addr)) {
 131     return oop(addr)->size();
 132   }
 133 
 134   assert(ClassUnloadingWithConcurrentMark,
 135          "All blocks should be objects if G1 Class Unloading isn't used. "
 136          "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 137          "addr: " PTR_FORMAT,


 159   size_t temp;
 160   return allocate_no_bot_updates(word_size, word_size, &temp);
 161 }
 162 
 163 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 164                                                      size_t desired_word_size,
 165                                                      size_t* actual_word_size) {
 166   assert(is_young(), "we can only skip BOT updates on young regions");
 167   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 168 }
 169 
 170 inline void HeapRegion::note_start_of_marking() {
 171   _next_marked_bytes = 0;
 172   _next_top_at_mark_start = top();
 173 }
 174 
 175 inline void HeapRegion::note_end_of_marking() {
 176   _prev_top_at_mark_start = _next_top_at_mark_start;
 177   _prev_marked_bytes = _next_marked_bytes;
 178   _next_marked_bytes = 0;
 179 
 180   assert(_prev_marked_bytes <=
 181          (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
 182          HeapWordSize, "invariant");
 183 }
 184 
 185 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
 186   if (is_survivor()) {
 187     // This is how we always allocate survivors.
 188     assert(_next_top_at_mark_start == bottom(), "invariant");
 189   } else {
 190     if (during_initial_mark) {
 191       // During initial-mark we'll explicitly mark any objects on old
 192       // regions that are pointed to by roots. Given that explicit
 193       // marks only make sense under NTAMS it'd be nice if we could
 194       // check that condition if we wanted to. Given that we don't
 195       // know where the top of this region will end up, we simply set
 196       // NTAMS to the end of the region so all marks will be below
 197       // NTAMS. We'll set it to the actual top when we retire this region.
 198       _next_top_at_mark_start = end();
 199     } else {
 200       // We could have re-used this old region as to-space over a
 201       // couple of GCs since the start of the concurrent marking
 202       // cycle. This means that [bottom,NTAMS) will contain objects




  98 // this is used for larger LAB allocations only.
  99 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t min_word_size,
 100                                                         size_t desired_word_size,
 101                                                         size_t* actual_size) {
 102   MutexLocker x(&_par_alloc_lock);
 103   return allocate(min_word_size, desired_word_size, actual_size);
 104 }
 105 
 106 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
 107   return _offsets.block_start(p);
 108 }
 109 
 110 inline HeapWord*
 111 G1OffsetTableContigSpace::block_start_const(const void* p) const {
 112   return _offsets.block_start_const(p);
 113 }
 114 
 115 inline bool
 116 HeapRegion::block_is_obj(const HeapWord* p) const {
 117   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 118 
 119   if (!this->is_in(p)) {
 120     HeapRegion* hr = g1h->heap_region_containing(p);
 121     return hr->block_is_obj(p);
 122   }
 123   if (ClassUnloadingWithConcurrentMark) {
 124     return !g1h->is_obj_dead(oop(p), this);
 125   }
 126   return p < top();
 127 }
 128 
 129 inline size_t
 130 HeapRegion::block_size(const HeapWord *addr) const {
 131   if (addr == top()) {
 132     return pointer_delta(end(), addr);
 133   }
 134 
 135   if (block_is_obj(addr)) {
 136     return oop(addr)->size();
 137   }
 138 
 139   assert(ClassUnloadingWithConcurrentMark,
 140          "All blocks should be objects if G1 Class Unloading isn't used. "
 141          "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
 142          "addr: " PTR_FORMAT,


 164   size_t temp;
 165   return allocate_no_bot_updates(word_size, word_size, &temp);
 166 }
 167 
 168 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size,
 169                                                      size_t desired_word_size,
 170                                                      size_t* actual_word_size) {
 171   assert(is_young(), "we can only skip BOT updates on young regions");
 172   return allocate_impl(min_word_size, desired_word_size, actual_word_size);
 173 }
 174 
 175 inline void HeapRegion::note_start_of_marking() {
 176   _next_marked_bytes = 0;
 177   _next_top_at_mark_start = top();
 178 }
 179 
 180 inline void HeapRegion::note_end_of_marking() {
 181   _prev_top_at_mark_start = _next_top_at_mark_start;
 182   _prev_marked_bytes = _next_marked_bytes;
 183   _next_marked_bytes = 0;




 184 }
 185 
 186 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
 187   if (is_survivor()) {
 188     // This is how we always allocate survivors.
 189     assert(_next_top_at_mark_start == bottom(), "invariant");
 190   } else {
 191     if (during_initial_mark) {
 192       // During initial-mark we'll explicitly mark any objects on old
 193       // regions that are pointed to by roots. Given that explicit
 194       // marks only make sense under NTAMS it'd be nice if we could
 195       // check that condition if we wanted to. Given that we don't
 196       // know where the top of this region will end up, we simply set
 197       // NTAMS to the end of the region so all marks will be below
 198       // NTAMS. We'll set it to the actual top when we retire this region.
 199       _next_top_at_mark_start = end();
 200     } else {
 201       // We could have re-used this old region as to-space over a
 202       // couple of GCs since the start of the concurrent marking
 203       // cycle. This means that [bottom,NTAMS) will contain objects


< prev index next >