src/share/vm/gc_implementation/g1/heapRegion.inline.hpp

Print this page
rev 6589 : 8047818: G1 HeapRegions can no longer be ContiguousSpaces
Reviewed-by:

*** 23,34 **** */ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { ! HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) { _offsets.alloc_block(res, size); } return res; } --- 23,70 ---- */ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP + #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" + #include "gc_implementation/g1/g1CollectedHeap.hpp" + #include "gc_implementation/g1/heapRegion.hpp" + #include "memory/space.hpp" + #include "runtime/atomic.inline.hpp" + + inline HeapWord* G1OffsetTableContigSpace::cas_allocate_inner(size_t size) { + HeapWord* obj = top(); + do { + if (pointer_delta(end(), obj) >= size) { + HeapWord* new_top = obj + size; + HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, &_top, obj); + if (result == obj) { + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + return obj; + } + obj = result; + } else { + break; + } + } while (true); + return NULL; + } + + inline HeapWord* G1OffsetTableContigSpace::allocate_inner(size_t size) { + HeapWord* obj = top(); + if (pointer_delta(end(), obj) >= size) { + HeapWord* new_top = obj + size; + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + set_top(new_top); + return obj; + } + return NULL; + } + + inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { ! HeapWord* res = allocate_inner(size); if (res != NULL) { _offsets.alloc_block(res, size); } return res; }
*** 36,51 **** // Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); ! // Given that we take the lock no need to use par_allocate() here. ! HeapWord* res = ContiguousSpace::allocate(size); ! if (res != NULL) { ! _offsets.alloc_block(res, size); ! } ! return res; } inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { return _offsets.block_start(p); } --- 72,82 ---- // Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); ! return allocate(size); } inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { return _offsets.block_start(p); }
*** 53,62 **** --- 84,119 ---- inline HeapWord* G1OffsetTableContigSpace::block_start_const(const void* p) const { return _offsets.block_start_const(p); } + inline bool + HeapRegion::block_is_obj(const HeapWord* p) const { + return p < top(); + } + + inline size_t + HeapRegion::block_size(const HeapWord *addr) const { + const HeapWord* current_top = top(); + if (addr < current_top) { + return oop(addr)->size(); + } else { + assert(addr == current_top, "just checking"); + return pointer_delta(end(), addr); + } + } + + inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return cas_allocate_inner(word_size); + } + + inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return allocate_inner(word_size); + } + inline void HeapRegion::note_start_of_marking() { _next_marked_bytes = 0; _next_top_at_mark_start = top(); }