--- old/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp 2014-06-23 16:11:25.057358428 +0200 +++ new/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp 2014-06-23 16:11:24.953354219 +0200 @@ -25,8 +25,44 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP +#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "memory/space.hpp" +#include "runtime/atomic.inline.hpp" + +inline HeapWord* G1OffsetTableContigSpace::cas_allocate_inner(size_t size) { + HeapWord* obj = top(); + do { + if (pointer_delta(end(), obj) >= size) { + HeapWord* new_top = obj + size; + HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, &_top, obj); + if (result == obj) { + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + return obj; + } + obj = result; + } else { + break; + } + } while (true); + return NULL; +} + +inline HeapWord* G1OffsetTableContigSpace::allocate_inner(size_t size) { + HeapWord* obj = top(); + if (pointer_delta(end(), obj) >= size) { + HeapWord* new_top = obj + size; + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + set_top(new_top); + return obj; + } + return NULL; +} + + inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { - HeapWord* res = ContiguousSpace::allocate(size); + HeapWord* res = allocate_inner(size); if (res != NULL) { _offsets.alloc_block(res, size); } @@ -38,12 +74,7 @@ // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); - // Given that we take the lock no need to use par_allocate() here. - HeapWord* res = ContiguousSpace::allocate(size); - if (res != NULL) { - _offsets.alloc_block(res, size); - } - return res; + return allocate(size); } inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { @@ -55,6 +86,32 @@ return _offsets.block_start_const(p); } +inline bool +HeapRegion::block_is_obj(const HeapWord* p) const { + return p < top(); +} + +inline size_t +HeapRegion::block_size(const HeapWord *addr) const { + const HeapWord* current_top = top(); + if (addr < current_top) { + return oop(addr)->size(); + } else { + assert(addr == current_top, "just checking"); + return pointer_delta(end(), addr); + } +} + +inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return cas_allocate_inner(word_size); +} + +inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return allocate_inner(word_size); +} + inline void HeapRegion::note_start_of_marking() { _next_marked_bytes = 0; _next_top_at_mark_start = top();