77 // Because of the requirement of keeping "_offsets" up to date with the 78 // allocations, we sequentialize these with a lock. Therefore, best if 79 // this is used for larger LAB allocations only. 80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { 81 MutexLocker x(&_par_alloc_lock); 82 return allocate(size); 83 } 84 85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { 86 return _offsets.block_start(p); 87 } 88 89 inline HeapWord* 90 G1OffsetTableContigSpace::block_start_const(const void* p) const { 91 return _offsets.block_start_const(p); 92 } 93 94 inline bool 95 HeapRegion::block_is_obj(const HeapWord* p) const { 96 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 97 if (G1ClassUnloadingEnabled) { 98 return !g1h->is_obj_dead(oop(p), this); 99 } 100 return p < top(); 101 } 102 103 inline size_t 104 HeapRegion::block_size(const HeapWord *addr) const { 105 if (addr == top()) { 106 return pointer_delta(end(), addr); 107 } 108 109 if (block_is_obj(addr)) { 110 return oop(addr)->size(); 111 } 112 113 assert(G1ClassUnloadingEnabled, 114 err_msg("All blocks should be objects if G1 Class Unloading isn't used. " 115 "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") " 116 "addr: " PTR_FORMAT, 117 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr))); 118 119 // Old regions' dead objects may have dead classes 120 // We need to find the next live object in some other 121 // manner than getting the oop size 122 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 123 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> 124 getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 125 126 assert(next > addr, "must get the next live object"); 127 return pointer_delta(next, addr); 128 } 129 130 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { 131 assert(is_young(), "we can only skip BOT updates on young regions"); 132 return par_allocate_impl(word_size, end()); 133 } | 77 // Because of the requirement of keeping "_offsets" up to date with the 78 // allocations, we sequentialize these with a lock. Therefore, best if 79 // this is used for larger LAB allocations only. 80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { 81 MutexLocker x(&_par_alloc_lock); 82 return allocate(size); 83 } 84 85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { 86 return _offsets.block_start(p); 87 } 88 89 inline HeapWord* 90 G1OffsetTableContigSpace::block_start_const(const void* p) const { 91 return _offsets.block_start_const(p); 92 } 93 94 inline bool 95 HeapRegion::block_is_obj(const HeapWord* p) const { 96 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 97 if (ClassUnloadingWithConcurrentMark) { 98 return !g1h->is_obj_dead(oop(p), this); 99 } 100 return p < top(); 101 } 102 103 inline size_t 104 HeapRegion::block_size(const HeapWord *addr) const { 105 if (addr == top()) { 106 return pointer_delta(end(), addr); 107 } 108 109 if (block_is_obj(addr)) { 110 return oop(addr)->size(); 111 } 112 113 assert(ClassUnloadingWithConcurrentMark, 114 err_msg("All blocks should be objects if G1 Class Unloading isn't used. " 115 "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") " 116 "addr: " PTR_FORMAT, 117 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr))); 118 119 // Old regions' dead objects may have dead classes 120 // We need to find the next live object in some other 121 // manner than getting the oop size 122 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 123 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> 124 getNextMarkedWordAddress(addr, prev_top_at_mark_start()); 125 126 assert(next > addr, "must get the next live object"); 127 return pointer_delta(next, addr); 128 } 129 130 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { 131 assert(is_young(), "we can only skip BOT updates on young regions"); 132 return par_allocate_impl(word_size, end()); 133 } |