--- old/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp 2014-06-23 16:11:21.585217876 +0200 +++ new/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp 2014-06-23 16:11:21.485213828 +0200 @@ -99,6 +99,21 @@ return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; } +void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { + check_index(index_for(right - 1), "right address out of range"); + assert(left < right, "Heap addresses out of order"); + size_t num_cards = pointer_delta(right, left) >> LogN_words; + if (UseMemSetInBOT) { + memset(&_offset_array[index_for(left)], offset, num_cards); + } else { + size_t i = index_for(left); + const size_t end = i + num_cards; + for (; i < end; i++) { + _offset_array[i] = offset; + } + } +} + ////////////////////////////////////////////////////////////////////// // G1BlockOffsetArray --- old/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp 2014-06-23 16:11:22.289246375 +0200 +++ new/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp 2014-06-23 16:11:22.177241841 +0200 @@ -157,6 +157,8 @@ return _offset_array[index]; } + void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); + void set_offset_array(size_t index, u_char offset) { check_index(index, "index out of range"); check_offset(offset, "offset too large"); @@ -170,21 +172,6 @@ _offset_array[index] = (u_char) pointer_delta(high, low); } - void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { - check_index(index_for(right - 1), "right address out of range"); - assert(left < right, "Heap addresses out of order"); - size_t num_cards = pointer_delta(right, left) >> LogN_words; - if (UseMemSetInBOT) { - memset(&_offset_array[index_for(left)], offset, num_cards); - } else { - size_t i = index_for(left); - const size_t end = i + num_cards; - for (; i < end; i++) { - _offset_array[i] = offset; - } - } - } - void set_offset_array(size_t left, size_t right, u_char offset) { check_index(right, "right index out of range"); assert(left <= right, "indexes out of order"); --- old/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp 2014-06-23 16:11:22.969273902 +0200 +++ new/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp 2014-06-23 16:11:22.857269369 +0200 @@ -26,7 +26,8 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP #include "gc_implementation/g1/g1BlockOffsetTable.hpp" -#include "gc_implementation/g1/heapRegion.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/heapRegion.inline.hpp" #include "memory/space.hpp" inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { --- old/src/share/vm/gc_implementation/g1/heapRegion.cpp 2014-06-23 16:11:23.641301106 +0200 +++ new/src/share/vm/gc_implementation/g1/heapRegion.cpp 2014-06-23 16:11:23.533296734 +0200 @@ -30,6 +30,7 @@ #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/shared/liveRange.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/iterator.hpp" #include "memory/space.inline.hpp" @@ -60,7 +61,7 @@ HeapRegion* hr, HeapWord* cur, HeapWord* top) { oop cur_oop = oop(cur); - int oop_size = cur_oop->size(); + size_t oop_size = hr->block_size(cur); HeapWord* next_obj = cur + oop_size; while (next_obj < top) { // Keep filtering the remembered set. @@ -71,7 +72,7 @@ } cur = next_obj; cur_oop = oop(cur); - oop_size = cur_oop->size(); + oop_size = hr->block_size(cur); next_obj = cur + oop_size; } return cur; @@ -81,7 +82,7 @@ HeapWord* bottom, HeapWord* top) { G1CollectedHeap* g1h = _g1; - int oop_size; + size_t oop_size; ExtendedOopClosure* cl2 = NULL; FilterIntoCSClosure intoCSFilt(this, g1h, _cl); @@ -101,7 +102,7 @@ if (!g1h->is_obj_dead(oop(bottom), _hr)) { oop_size = oop(bottom)->oop_iterate(cl2, mr); } else { - oop_size = oop(bottom)->size(); + oop_size = _hr->block_size(bottom); } bottom += oop_size; @@ -460,7 +461,7 @@ } else if (!g1h->is_obj_dead(obj)) { cl->do_object(obj); } - cur += obj->size(); + cur += block_size(cur); } return NULL; } @@ -532,7 +533,7 @@ return cur; } // Otherwise... - next = (cur + obj->size()); + next = cur + block_size(cur); } // If we finish the above loop...We have a parseable object that @@ -540,10 +541,9 @@ // inside or spans the entire region. assert(obj == oop(cur), "sanity"); - assert(cur <= start && - obj->klass_or_null() != NULL && - (cur + obj->size()) > start, - "Loop postcondition"); + assert(cur <= start, "Loop postcondition"); + assert(obj->klass_or_null() != NULL, "Loop postcondition"); + assert((cur + block_size(cur)) > start, "Loop postcondition"); if (!g1h->is_obj_dead(obj)) { obj->oop_iterate(cl, mr); @@ -557,7 +557,7 @@ }; // Otherwise: - next = (cur + obj->size()); + next = cur + block_size(cur); if (!g1h->is_obj_dead(obj)) { if (next < end || !obj->is_objArray()) { @@ -912,7 +912,7 @@ size_t object_num = 0; while (p < top()) { oop obj = oop(p); - size_t obj_size = obj->size(); + size_t obj_size = block_size(p); object_num += 1; if (is_humongous != g1->isHumongous(obj_size)) { @@ -1048,7 +1048,8 @@ // away eventually. void G1OffsetTableContigSpace::clear(bool mangle_space) { - ContiguousSpace::clear(mangle_space); + set_top(bottom()); + CompactibleSpace::clear(mangle_space); _offsets.zero_bottom_entry(); _offsets.initialize_threshold(); } @@ -1086,7 +1087,7 @@ if (_gc_time_stamp < g1h->get_gc_time_stamp()) return top(); else - return ContiguousSpace::saved_mark_word(); + return Space::saved_mark_word(); } void G1OffsetTableContigSpace::record_top_and_timestamp() { @@ -1101,7 +1102,7 @@ // of region. If it does so after _gc_time_stamp = ..., then it // will pick up the right saved_mark_word() as the high water mark // of the region. Either way, the behavior will be correct. - ContiguousSpace::set_saved_mark(); + Space::set_saved_mark_word(top()); OrderAccess::storestore(); _gc_time_stamp = curr_gc_time_stamp; // No need to do another barrier to flush the writes above. If @@ -1112,16 +1113,38 @@ } } +void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) { + object_iterate(blk); +} + +void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) { + HeapWord* p = bottom(); + if (!block_is_obj(p)) { + p += block_size(p); + } + while (p < top()) { + blk->do_object(oop(p)); + p += block_size(p); + } +} + +#define block_is_always_obj(q) true +void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) { + SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size); +} +#undef block_is_always_obj + G1OffsetTableContigSpace:: G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : + _top(bottom()), _offsets(sharedOffsetArray, mr), _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), _gc_time_stamp(0) { _offsets.set_space(this); // false ==> we'll do the clearing if there's clearing to be done. - ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle); + CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle); _offsets.zero_bottom_entry(); _offsets.initialize_threshold(); } --- old/src/share/vm/gc_implementation/g1/heapRegion.hpp 2014-06-23 16:11:24.381331063 +0200 +++ new/src/share/vm/gc_implementation/g1/heapRegion.hpp 2014-06-23 16:11:24.269326529 +0200 @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP -#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/g1BlockOffsetTable.hpp" #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #include "gc_implementation/g1/survRateGroup.hpp" #include "gc_implementation/shared/ageTable.hpp" @@ -46,8 +46,6 @@ // The solution is to remove this method from the definition // of a Space. -class CompactibleSpace; -class ContiguousSpace; class HeapRegionRemSet; class HeapRegionRemSetIterator; class HeapRegion; @@ -125,10 +123,12 @@ // the regions anyway) and at the end of a Full GC. The current scheme // that uses sequential unsigned ints will fail only if we have 4b // evacuation pauses between two cleanups, which is _highly_ unlikely. - -class G1OffsetTableContigSpace: public ContiguousSpace { +class G1OffsetTableContigSpace: public CompactibleSpace { friend class VMStructs; + HeapWord* _top; protected: + inline HeapWord* cas_allocate_inner(size_t size); + inline HeapWord* allocate_inner(size_t size); G1BlockOffsetArrayContigSpace _offsets; Mutex _par_alloc_lock; volatile unsigned _gc_time_stamp; @@ -144,6 +144,19 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr); + inline void set_top(HeapWord* value) { _top = value; } + HeapWord* top() const { return _top; } + void reset_after_compaction() { set_top(compaction_top()); } + + size_t used() const { return byte_size(bottom(), top()); } + size_t free() const { return byte_size(top(), end()); } + bool is_free_block(const HeapWord* p) const { return p >= top(); } + + MemRegion used_region() const { return MemRegion(bottom(), top()); } + + void object_iterate(ObjectClosure* blk); + void safe_object_iterate(ObjectClosure* blk); + void set_bottom(HeapWord* value); void set_end(HeapWord* value); @@ -168,6 +181,8 @@ HeapWord* block_start(const void* p); HeapWord* block_start_const(const void* p) const; + void prepare_for_compaction(CompactPoint* cp); + // Add offset table update. virtual HeapWord* allocate(size_t word_size); HeapWord* par_allocate(size_t word_size); @@ -353,14 +368,11 @@ ParMarkRootClaimValue = 9 }; - inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { - assert(is_young(), "we can only skip BOT updates on young regions"); - return ContiguousSpace::par_allocate(word_size); - } - inline HeapWord* allocate_no_bot_updates(size_t word_size) { - assert(is_young(), "we can only skip BOT updates on young regions"); - return ContiguousSpace::allocate(word_size); - } + bool block_is_obj(const HeapWord* p) const; + size_t block_size(const HeapWord* p) const; + + inline HeapWord* par_allocate_no_bot_updates(size_t word_size); + inline HeapWord* allocate_no_bot_updates(size_t word_size); // If this region is a member of a HeapRegionSeq, the index in that // sequence, otherwise -1. --- old/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp 2014-06-23 16:11:25.057358428 +0200 +++ new/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp 2014-06-23 16:11:24.953354219 +0200 @@ -25,8 +25,44 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP +#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/g1CollectedHeap.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "memory/space.hpp" +#include "runtime/atomic.inline.hpp" + +inline HeapWord* G1OffsetTableContigSpace::cas_allocate_inner(size_t size) { + HeapWord* obj = top(); + do { + if (pointer_delta(end(), obj) >= size) { + HeapWord* new_top = obj + size; + HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, &_top, obj); + if (result == obj) { + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + return obj; + } + obj = result; + } else { + break; + } + } while (true); + return NULL; +} + +inline HeapWord* G1OffsetTableContigSpace::allocate_inner(size_t size) { + HeapWord* obj = top(); + if (pointer_delta(end(), obj) >= size) { + HeapWord* new_top = obj + size; + assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + set_top(new_top); + return obj; + } + return NULL; +} + + inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { - HeapWord* res = ContiguousSpace::allocate(size); + HeapWord* res = allocate_inner(size); if (res != NULL) { _offsets.alloc_block(res, size); } @@ -38,12 +74,7 @@ // this is used for larger LAB allocations only. inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { MutexLocker x(&_par_alloc_lock); - // Given that we take the lock no need to use par_allocate() here. - HeapWord* res = ContiguousSpace::allocate(size); - if (res != NULL) { - _offsets.alloc_block(res, size); - } - return res; + return allocate(size); } inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { @@ -55,6 +86,32 @@ return _offsets.block_start_const(p); } +inline bool +HeapRegion::block_is_obj(const HeapWord* p) const { + return p < top(); +} + +inline size_t +HeapRegion::block_size(const HeapWord *addr) const { + const HeapWord* current_top = top(); + if (addr < current_top) { + return oop(addr)->size(); + } else { + assert(addr == current_top, "just checking"); + return pointer_delta(end(), addr); + } +} + +inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return cas_allocate_inner(word_size); +} + +inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { + assert(is_young(), "we can only skip BOT updates on young regions"); + return allocate_inner(word_size); +} + inline void HeapRegion::note_start_of_marking() { _next_marked_bytes = 0; _next_top_at_mark_start = top(); --- old/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp 2014-06-23 16:11:25.737385956 +0200 +++ new/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp 2014-06-23 16:11:25.629381584 +0200 @@ -69,7 +69,8 @@ \ declare_type(G1CollectedHeap, SharedHeap) \ \ - declare_type(HeapRegion, ContiguousSpace) \ + declare_type(G1OffsetTableContigSpace, CompactibleSpace) \ + declare_type(HeapRegion, G1OffsetTableContigSpace) \ declare_toplevel_type(HeapRegionSeq) \ declare_toplevel_type(HeapRegionSetBase) \ declare_toplevel_type(HeapRegionSetCount) \ --- old/src/share/vm/memory/space.cpp 2014-06-23 16:11:26.453414941 +0200 +++ new/src/share/vm/memory/space.cpp 2014-06-23 16:11:26.349410731 +0200 @@ -684,14 +684,8 @@ // This version requires locking. inline HeapWord* ContiguousSpace::allocate_impl(size_t size, HeapWord* const end_value) { - // In G1 there are places where a GC worker can allocates into a - // region using this serial allocation code without being prone to a - // race with other GC workers (we ensure that no other GC worker can - // access the same region at the same time). So the assert below is - // too strong in the case of G1. assert(Heap_lock->owned_by_self() || - (SafepointSynchronize::is_at_safepoint() && - (Thread::current()->is_VM_thread() || UseG1GC)), + (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); HeapWord* obj = top(); if (pointer_delta(end_value, obj) >= size) {