76 77 // Because of the requirement of keeping "_offsets" up to date with the 78 // allocations, we sequentialize these with a lock. Therefore, best if 79 // this is used for larger LAB allocations only. 80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { 81 MutexLocker x(&_par_alloc_lock); 82 return allocate(size); 83 } 84 85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { 86 return _offsets.block_start(p); 87 } 88 89 inline HeapWord* 90 G1OffsetTableContigSpace::block_start_const(const void* p) const { 91 return _offsets.block_start_const(p); 92 } 93 94 inline bool 95 HeapRegion::block_is_obj(const HeapWord* p) const { 96 return p < top(); 97 } 98 99 inline size_t 100 HeapRegion::block_size(const HeapWord *addr) const { 101 const HeapWord* current_top = top(); 102 if (addr < current_top) { 103 return oop(addr)->size(); 104 } else { 105 assert(addr == current_top, "just checking"); 106 return pointer_delta(end(), addr); 107 } 108 } 109 110 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { 111 assert(is_young(), "we can only skip BOT updates on young regions"); 112 return par_allocate_impl(word_size, end()); 113 } 114 115 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 116 assert(is_young(), "we can only skip BOT updates on young regions"); 117 return allocate_impl(word_size, end()); 118 } 119 120 inline void HeapRegion::note_start_of_marking() { 121 _next_marked_bytes = 0; 122 _next_top_at_mark_start = top(); 123 } 124 125 inline void HeapRegion::note_end_of_marking() { 126 _prev_top_at_mark_start = _next_top_at_mark_start; 127 _prev_marked_bytes = _next_marked_bytes; | 76 77 // Because of the requirement of keeping "_offsets" up to date with the 78 // allocations, we sequentialize these with a lock. Therefore, best if 79 // this is used for larger LAB allocations only. 80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { 81 MutexLocker x(&_par_alloc_lock); 82 return allocate(size); 83 } 84 85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { 86 return _offsets.block_start(p); 87 } 88 89 inline HeapWord* 90 G1OffsetTableContigSpace::block_start_const(const void* p) const { 91 return _offsets.block_start_const(p); 92 } 93 94 inline bool 95 HeapRegion::block_is_obj(const HeapWord* p) const { 96 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 97 return !g1h->is_obj_dead(oop(p), this); 98 } 99 100 inline size_t 101 HeapRegion::block_size(const HeapWord *addr) const { 102 // Old regions' dead objects may have dead classes 103 // We need to find the next live object in some other 104 // manner than getting the oop size 105 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 106 if (g1h->is_obj_dead(oop(addr), this)) { 107 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> 108 getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 109 110 assert(next > addr, "must get the next live object"); 111 112 return pointer_delta(next, addr); 113 } else if (addr == top()) { 114 return pointer_delta(end(), addr); 115 } 116 return oop(addr)->size(); 117 } 118 119 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { 120 assert(is_young(), "we can only skip BOT updates on young regions"); 121 return par_allocate_impl(word_size, end()); 122 } 123 124 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { 125 assert(is_young(), "we can only skip BOT updates on young regions"); 126 return allocate_impl(word_size, end()); 127 } 128 129 inline void HeapRegion::note_start_of_marking() { 130 _next_marked_bytes = 0; 131 _next_top_at_mark_start = top(); 132 } 133 134 inline void HeapRegion::note_end_of_marking() { 135 _prev_top_at_mark_start = _next_top_at_mark_start; 136 _prev_marked_bytes = _next_marked_bytes; |