--- old/src/hotspot/share/aot/aotCodeHeap.cpp 2019-11-06 20:18:57.689016218 +0100 +++ new/src/hotspot/share/aot/aotCodeHeap.cpp 2019-11-06 20:18:57.465014169 +0100 @@ -34,6 +34,7 @@ #include "interpreter/abstractInterpreter.hpp" #include "jvmci/compilerRuntime.hpp" #include "jvmci/jvmciRuntime.hpp" +#include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/universe.hpp" #include "oops/compressedOops.hpp" --- old/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp 2019-11-06 20:18:58.409022801 +0100 +++ new/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp 2019-11-06 20:18:58.205020936 +0100 @@ -32,7 +32,7 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/space.inline.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" --- old/src/hotspot/share/gc/cms/parNewGeneration.cpp 2019-11-06 20:18:59.149029563 +0100 +++ new/src/hotspot/share/gc/cms/parNewGeneration.cpp 2019-11-06 20:18:58.941027662 +0100 @@ -44,7 +44,7 @@ #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/space.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "gc/shared/weakProcessor.hpp" --- old/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp 2019-11-06 20:18:59.889036320 +0100 +++ new/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp 2019-11-06 20:18:59.669034312 +0100 @@ -25,8 +25,7 @@ #include "precompiled.hpp" #include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/heapRegion.hpp" -#include "gc/shared/space.hpp" +#include "gc/g1/heapRegion.inline.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" @@ -74,12 +73,12 @@ // G1BlockOffsetTablePart ////////////////////////////////////////////////////////////////////// -G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp) : +G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, HeapRegion* hr) : _next_offset_threshold(NULL), _next_offset_index(0), DEBUG_ONLY(_object_can_span(false) COMMA) _bot(array), - _space(gsp) + _hr(hr) { } @@ -141,7 +140,7 @@ if (start_card > end_card) { return; } - assert(start_card > _bot->index_for(_space->bottom()), "Cannot be first card"); + assert(start_card > _bot->index_for(_hr->bottom()), "Cannot be first card"); assert(_bot->offset_array(start_card-1) <= BOTConstants::N_words, "Offset card has an unexpected value"); size_t start_card_for_region = start_card; @@ -224,7 +223,7 @@ "next_boundary is beyond the end of the covered region " " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT, p2i(next_boundary), p2i(_bot->_reserved.end())); - if (addr >= _space->top()) return _space->top(); + if (addr >= _hr->top()) return _hr->top(); while (next_boundary < addr) { while (n <= next_boundary) { q = n; @@ -326,9 +325,9 @@ } void G1BlockOffsetTablePart::verify() const { - assert(_space->bottom() < _space->top(), "Only non-empty regions should be verified."); - size_t start_card = _bot->index_for(_space->bottom()); - size_t end_card = _bot->index_for(_space->top() - 1); + assert(_hr->bottom() < _hr->top(), "Only non-empty regions should be verified."); + size_t start_card = _bot->index_for(_hr->bottom()); + size_t end_card = _bot->index_for(_hr->top() - 1); for (size_t current_card = start_card; current_card < end_card; current_card++) { u_char entry = _bot->offset_array(current_card); @@ -342,9 +341,9 @@ HeapWord* obj = obj_end; size_t obj_size = block_size(obj); obj_end = obj + obj_size; - guarantee(obj_end > obj && obj_end <= _space->top(), + guarantee(obj_end > obj && obj_end <= _hr->top(), "Invalid object end. obj: " PTR_FORMAT " obj_size: " SIZE_FORMAT " obj_end: " PTR_FORMAT " top: " PTR_FORMAT, - p2i(obj), obj_size, p2i(obj_end), p2i(_space->top())); + p2i(obj), obj_size, p2i(obj_end), p2i(_hr->top())); } } else { // Because we refine the BOT based on which cards are dirty there is not much we can verify here. @@ -359,9 +358,9 @@ start_card, current_card, backskip); HeapWord* backskip_address = _bot->address_for_index(current_card - backskip); - guarantee(backskip_address >= _space->bottom(), + guarantee(backskip_address >= _hr->bottom(), "Going backwards beyond bottom of the region: bottom: " PTR_FORMAT ", backskip_address: " PTR_FORMAT, - p2i(_space->bottom()), p2i(backskip_address)); + p2i(_hr->bottom()), p2i(backskip_address)); } } } @@ -373,13 +372,12 @@ #endif #ifndef PRODUCT -void -G1BlockOffsetTablePart::print_on(outputStream* out) { - size_t from_index = _bot->index_for(_space->bottom()); - size_t to_index = _bot->index_for(_space->end()); +void G1BlockOffsetTablePart::print_on(outputStream* out) { + size_t from_index = _bot->index_for(_hr->bottom()); + size_t to_index = _bot->index_for(_hr->end()); out->print_cr(">> BOT for area [" PTR_FORMAT "," PTR_FORMAT ") " "cards [" SIZE_FORMAT "," SIZE_FORMAT ")", - p2i(_space->bottom()), p2i(_space->end()), from_index, to_index); + p2i(_hr->bottom()), p2i(_hr->end()), from_index, to_index); for (size_t i = from_index; i < to_index; ++i) { out->print_cr(" entry " SIZE_FORMAT_W(8) " | " PTR_FORMAT " : %3u", i, p2i(_bot->address_for_index(i)), @@ -391,7 +389,7 @@ #endif // !PRODUCT HeapWord* G1BlockOffsetTablePart::initialize_threshold_raw() { - _next_offset_index = _bot->index_for_raw(_space->bottom()); + _next_offset_index = _bot->index_for_raw(_hr->bottom()); _next_offset_index++; _next_offset_threshold = _bot->address_for_index_raw(_next_offset_index); @@ -399,14 +397,14 @@ } void G1BlockOffsetTablePart::zero_bottom_entry_raw() { - size_t bottom_index = _bot->index_for_raw(_space->bottom()); - assert(_bot->address_for_index_raw(bottom_index) == _space->bottom(), + size_t bottom_index = _bot->index_for_raw(_hr->bottom()); + assert(_bot->address_for_index_raw(bottom_index) == _hr->bottom(), "Precondition of call"); _bot->set_offset_array_raw(bottom_index, 0); } HeapWord* G1BlockOffsetTablePart::initialize_threshold() { - _next_offset_index = _bot->index_for(_space->bottom()); + _next_offset_index = _bot->index_for(_hr->bottom()); _next_offset_index++; _next_offset_threshold = _bot->address_for_index(_next_offset_index); @@ -416,7 +414,7 @@ void G1BlockOffsetTablePart::set_for_starts_humongous(HeapWord* obj_top, size_t fill_size) { // The first BOT entry should have offset 0. reset_bot(); - alloc_block(_space->bottom(), obj_top); + alloc_block(_hr->bottom(), obj_top); if (fill_size > 0) { alloc_block(obj_top, fill_size); } --- old/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp 2019-11-06 20:19:00.593042746 +0100 +++ new/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp 2019-11-06 20:19:00.389040884 +0100 @@ -33,14 +33,14 @@ // Forward declarations class G1BlockOffsetTable; -class G1ContiguousSpace; +class HeapRegion; // This implementation of "G1BlockOffsetTable" divides the covered region // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry // for each such subregion indicates how far back one must go to find the // start of the chunk that includes the first word of the subregion. // -// Each G1BlockOffsetTablePart is owned by a G1ContiguousSpace. +// Each G1BlockOffsetTablePart is owned by a HeapRegion. class G1BlockOffsetTable: public CHeapObj { friend class G1BlockOffsetTablePart; @@ -120,8 +120,8 @@ // This is the global BlockOffsetTable. G1BlockOffsetTable* _bot; - // The space that owns this subregion. - G1ContiguousSpace* _space; + // The region that owns this subregion. + HeapRegion* _hr; // Sets the entries // corresponding to the cards starting at "start" and ending at "end" @@ -183,7 +183,7 @@ public: // The elements of the array are initialized to zero. - G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp); + G1BlockOffsetTablePart(G1BlockOffsetTable* array, HeapRegion* hr); void verify() const; --- old/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp 2019-11-06 20:19:01.277048985 +0100 +++ new/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp 2019-11-06 20:19:01.069047088 +0100 @@ -28,11 +28,10 @@ #include "gc/g1/g1BlockOffsetTable.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/memset_with_concurrent_readers.hpp" -#include "gc/shared/space.hpp" #include "runtime/atomic.hpp" inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr) { - if (addr >= _space->bottom() && addr < _space->end()) { + if (addr >= _hr->bottom() && addr < _hr->end()) { HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1); return forward_to_block_containing_addr(q, addr); } else { @@ -41,7 +40,7 @@ } inline HeapWord* G1BlockOffsetTablePart::block_start_const(const void* addr) const { - if (addr >= _space->bottom() && addr < _space->end()) { + if (addr >= _hr->bottom() && addr < _hr->end()) { HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1); HeapWord* n = q + block_size(q); return forward_to_block_containing_addr_const(q, n, addr); @@ -107,15 +106,15 @@ } inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p) const { - return _space->block_size(p); + return _hr->block_size(p); } inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr, bool has_max_index, size_t max_index) const { - assert(_object_can_span || _bot->offset_array(_bot->index_for(_space->bottom())) == 0, + assert(_object_can_span || _bot->offset_array(_bot->index_for(_hr->bottom())) == 0, "Object crossed region boundary, found offset %u instead of 0", - (uint) _bot->offset_array(_bot->index_for(_space->bottom()))); + (uint) _bot->offset_array(_bot->index_for(_hr->bottom()))); size_t index = _bot->index_for(addr); // We must make sure that the offset table entry we use is valid. If // "addr" is past the end, start at the last known one and go forward. @@ -140,7 +139,7 @@ inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, const void* addr) const { - if (addr >= _space->top()) return _space->top(); + if (addr >= _hr->top()) return _hr->top(); while (n <= addr) { q = n; oop obj = oop(q); --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2019-11-06 20:19:01.957055184 +0100 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2019-11-06 20:19:01.753053324 +0100 @@ -664,8 +664,6 @@ next_region = NULL; } curr_region->set_top(top); - curr_region->set_first_dead(top); - curr_region->set_end_of_live(top); curr_region = next_region; } @@ -3978,7 +3976,7 @@ } if (!r->evacuation_failed()) { - assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index()); + assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index()); _before_used_bytes += r->used(); g1h->free_region(r, &_local_free_list, --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2019-11-06 20:19:02.745062363 +0100 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2019-11-06 20:19:02.529060395 +0100 @@ -32,6 +32,7 @@ #include "gc/g1/heapRegionSet.hpp" #include "gc/shared/taskqueue.hpp" #include "gc/shared/verifyOption.hpp" +#include "gc/shared/workgroup.hpp" #include "memory/allocation.hpp" #include "utilities/compilerWarnings.hpp" --- old/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp 2019-11-06 20:19:03.477069027 +0100 +++ new/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp 2019-11-06 20:19:03.257067025 +0100 @@ -28,6 +28,7 @@ #include "gc/g1/g1YoungGenSizer.hpp" #include "gc/g1/heapRegion.hpp" #include "logging/log.hpp" +#include "runtime/globals_extension.hpp" G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) { --- old/src/hotspot/share/gc/g1/heapRegion.cpp 2019-11-06 20:19:04.149075142 +0100 +++ new/src/hotspot/share/gc/g1/heapRegion.cpp 2019-11-06 20:19:03.929073142 +0100 @@ -35,7 +35,6 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionTracer.hpp" #include "gc/shared/genOopClosures.inline.hpp" -#include "gc/shared/space.inline.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/iterator.inline.hpp" @@ -45,7 +44,6 @@ #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/orderAccess.hpp" -#include "utilities/growableArray.hpp" int HeapRegion::LogOfHRGrainBytes = 0; int HeapRegion::LogOfHRGrainWords = 0; @@ -234,21 +232,27 @@ HeapRegion::HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr) : - G1ContiguousSpace(bot), - _rem_set(NULL), - _hrm_index(hrm_index), - _type(), - _humongous_start_region(NULL), - _evacuation_failed(false), - _next(NULL), _prev(NULL), + _bottom(NULL), + _end(NULL), + _top(NULL), + _compaction_top(NULL), + _bot_part(bot, this), + _par_alloc_lock(Mutex::leaf, "HeapRegion par alloc lock", true), + _pre_dummy_top(NULL), + _rem_set(NULL), + _hrm_index(hrm_index), + _type(), + _humongous_start_region(NULL), + _evacuation_failed(false), + _next(NULL), _prev(NULL), #ifdef ASSERT - _containing_set(NULL), + _containing_set(NULL), #endif - _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), - _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1), - _surv_rate_group(NULL), _age_index(-1), - _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL), - _recorded_rs_length(0), _predicted_elapsed_time_ms(0) + _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), + _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1), + _surv_rate_group(NULL), _age_index(-1), + _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL), + _recorded_rs_length(0), _predicted_elapsed_time_ms(0) { _rem_set = new HeapRegionRemSet(bot, this); @@ -258,10 +262,20 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { assert(_rem_set->is_empty(), "Remembered set must be empty"); - G1ContiguousSpace::initialize(mr, clear_space, mangle_space); + assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()), + "invalid space boundaries"); + + set_bottom(mr.start()); + set_end(mr.end()); + if (clear_space) { + clear(mangle_space); + } - hr_clear(false /*par*/, false /*clear_space*/); set_top(bottom()); + set_compaction_top(bottom()); + reset_bot(); + + hr_clear(false /*par*/, false /*clear_space*/); } void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { @@ -444,6 +458,7 @@ } void HeapRegion::print() const { print_on(tty); } + void HeapRegion::print_on(outputStream* st) const { st->print("|%4u", this->_hrm_index); st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, @@ -636,9 +651,6 @@ debug_only(virtual bool should_verify_oops() { return false; }) }; -// This really ought to be commoned up into OffsetTableContigSpace somehow. -// We would need a mechanism to make that code skip dead objects. - void HeapRegion::verify(VerifyOption vo, bool* failures) const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); @@ -828,51 +840,32 @@ guarantee(!failures, "HeapRegion RemSet verification failed"); } -void HeapRegion::prepare_for_compaction(CompactPoint* cp) { - // Not used for G1 anymore, but pure virtual in Space. - ShouldNotReachHere(); -} - -// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go -// away eventually. - -void G1ContiguousSpace::clear(bool mangle_space) { +void HeapRegion::clear(bool mangle_space) { set_top(bottom()); - CompactibleSpace::clear(mangle_space); + set_compaction_top(bottom()); + + if (ZapUnusedHeapArea && mangle_space) { + mangle_unused_area(); + } reset_bot(); } -#ifndef PRODUCT -void G1ContiguousSpace::mangle_unused_area() { - mangle_unused_area_complete(); -} -void G1ContiguousSpace::mangle_unused_area_complete() { +#ifndef PRODUCT +void HeapRegion::mangle_unused_area() { SpaceMangler::mangle_region(MemRegion(top(), end())); } #endif -void G1ContiguousSpace::print() const { - print_short(); - tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " - INTPTR_FORMAT ", " INTPTR_FORMAT ")", - p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); -} - -HeapWord* G1ContiguousSpace::initialize_threshold() { +HeapWord* HeapRegion::initialize_threshold() { return _bot_part.initialize_threshold(); } -HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, - HeapWord* end) { +HeapWord* HeapRegion::cross_threshold(HeapWord* start, HeapWord* end) { _bot_part.alloc_block(start, end); return _bot_part.threshold(); } -void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { - object_iterate(blk); -} - -void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { +void HeapRegion::object_iterate(ObjectClosure* blk) { HeapWord* p = bottom(); while (p < top()) { if (block_is_obj(p)) { @@ -881,18 +874,3 @@ p += block_size(p); } } - -G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : - _top(NULL), - _bot_part(bot, this), - _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true), - _pre_dummy_top(NULL) -{ -} - -void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { - CompactibleSpace::initialize(mr, clear_space, mangle_space); - _top = bottom(); - set_saved_mark_word(NULL); - reset_bot(); -} --- old/src/hotspot/share/gc/g1/heapRegion.hpp 2019-11-06 20:19:04.865081653 +0100 +++ new/src/hotspot/share/gc/g1/heapRegion.hpp 2019-11-06 20:19:04.653079725 +0100 @@ -31,34 +31,13 @@ #include "gc/g1/heapRegionType.hpp" #include "gc/g1/survRateGroup.hpp" #include "gc/shared/ageTable.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/verifyOption.hpp" #include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/verifyOption.hpp" +#include "runtime/mutex.hpp" #include "utilities/macros.hpp" -// A HeapRegion is the smallest piece of a G1CollectedHeap that -// can be collected independently. - -// NOTE: Although a HeapRegion is a Space, its -// Space::initDirtyCardClosure method must not be called. -// The problem is that the existence of this method breaks -// the independence of barrier sets from remembered sets. -// The solution is to remove this method from the definition -// of a Space. - -// Each heap region is self contained. top() and end() can never -// be set beyond the end of the region. For humongous objects, -// the first region is a StartsHumongous region. If the humongous -// object is larger than a heap region, the following regions will -// be of type ContinuesHumongous. In this case the top() of the -// StartHumongous region and all ContinuesHumongous regions except -// the last will point to their own end. The last ContinuesHumongous -// region may have top() equal the end of object if there isn't -// room for filler objects to pad out to the end of the region. - class G1CollectedHeap; class G1CMBitMap; -class G1IsAliveAndApplyClosure; class HeapRegionRemSet; class HeapRegion; class HeapRegionSetBase; @@ -73,31 +52,27 @@ // sentinel value for hrm_index #define G1_NO_HRM_INDEX ((uint) -1) -// The complicating factor is that BlockOffsetTable diverged -// significantly, and we need functionality that is only in the G1 version. -// So I copied that code, which led to an alternate G1 version of -// OffsetTableContigSpace. If the two versions of BlockOffsetTable could -// be reconciled, then G1OffsetTableContigSpace could go away. - -// The idea behind time stamps is the following. We want to keep track of -// the highest address where it's safe to scan objects for each region. -// This is only relevant for current GC alloc regions so we keep a time stamp -// per region to determine if the region has been allocated during the current -// GC or not. If the time stamp is current we report a scan_top value which -// was saved at the end of the previous GC for retained alloc regions and which is -// equal to the bottom for all other regions. -// There is a race between card scanners and allocating gc workers where we must ensure -// that card scanners do not read the memory allocated by the gc workers. -// In order to enforce that, we must not return a value of _top which is more recent than the -// time stamp. This is due to the fact that a region may become a gc alloc region at -// some point after we've read the timestamp value as being < the current time stamp. -// The time stamps are re-initialized to zero at cleanup and at Full GCs. -// The current scheme that uses sequential unsigned ints will fail only if we have 4b -// evacuation pauses between two cleanups, which is _highly_ unlikely. -class G1ContiguousSpace: public CompactibleSpace { +// A HeapRegion is the smallest piece of a G1CollectedHeap that +// can be collected independently. + +// Each heap region is self contained. top() and end() can never +// be set beyond the end of the region. For humongous objects, +// the first region is a StartsHumongous region. If the humongous +// object is larger than a heap region, the following regions will +// be of type ContinuesHumongous. In this case the top() of the +// StartHumongous region and all ContinuesHumongous regions except +// the last will point to their own end. The last ContinuesHumongous +// region may have top() equal the end of object if there isn't +// room for filler objects to pad out to the end of the region. +class HeapRegion : public CHeapObj { friend class VMStructs; + + HeapWord* _bottom; + HeapWord* _end; + HeapWord* volatile _top; - protected: + HeapWord* _compaction_top; + G1BlockOffsetTablePart _bot_part; Mutex _par_alloc_lock; // When we need to retire an allocation region, while other threads @@ -108,43 +83,57 @@ // into the region was and this is what this keeps track. HeapWord* _pre_dummy_top; - public: - G1ContiguousSpace(G1BlockOffsetTable* bot); +public: + void set_bottom(HeapWord* value) { _bottom = value; } + HeapWord* bottom() const { return _bottom; } + + void set_end(HeapWord* value) { _end = value; } + HeapWord* end() const { return _end; } + + void set_compaction_top(HeapWord* compaction_top) { _compaction_top = compaction_top; } + HeapWord* compaction_top() const { return _compaction_top; } void set_top(HeapWord* value) { _top = value; } HeapWord* top() const { return _top; } - protected: - // Reset the G1ContiguousSpace. - virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + // Returns true iff the given the heap region contains the + // given address as part of an allocated object. This may + // be a potentially, so we restrict its use to assertion checks only. + bool is_in(const void* p) const { + return is_in_reserved(p); + } + bool is_in(oop obj) const { + return is_in((void*)obj); + } + // Returns true iff the given reserved memory of the space contains the + // given address. + bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; } + + size_t capacity() const { return byte_size(bottom(), end()); } + size_t used() const { return byte_size(bottom(), top()); } + size_t free() const { return byte_size(top(), end()); } + + bool is_empty() const { return used() == 0; } + +private: + void reset_after_compaction() { set_top(compaction_top()); } - HeapWord* volatile* top_addr() { return &_top; } - // Try to allocate at least min_word_size and up to desired_size from this Space. + // Try to allocate at least min_word_size and up to desired_size from this region. // Returns NULL if not possible, otherwise sets actual_word_size to the amount of // space allocated. - // This version assumes that all allocation requests to this Space are properly + // This version assumes that all allocation requests to this HeapRegion are properly // synchronized. inline HeapWord* allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); - // Try to allocate at least min_word_size and up to desired_size from this Space. + // Try to allocate at least min_word_size and up to desired_size from this HeapRegion. // Returns NULL if not possible, otherwise sets actual_word_size to the amount of // space allocated. // This version synchronizes with other calls to par_allocate_impl(). inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); - public: - void reset_after_compaction() { set_top(compaction_top()); } - - size_t used() const { return byte_size(bottom(), top()); } - size_t free() const { return byte_size(top(), end()); } - bool is_free_block(const HeapWord* p) const { return p >= top(); } - - MemRegion used_region() const { return MemRegion(bottom(), top()); } + void mangle_unused_area() PRODUCT_RETURN; +public: void object_iterate(ObjectClosure* blk); - void safe_object_iterate(ObjectClosure* blk); - - void mangle_unused_area() PRODUCT_RETURN; - void mangle_unused_area_complete() PRODUCT_RETURN; // See the comment above in the declaration of _pre_dummy_top for an // explanation of what it is. @@ -152,32 +141,31 @@ assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition"); _pre_dummy_top = pre_dummy_top; } + HeapWord* pre_dummy_top() { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; } void reset_pre_dummy_top() { _pre_dummy_top = NULL; } - virtual void clear(bool mangle_space); + void clear(bool mangle_space); HeapWord* block_start(const void* p); HeapWord* block_start_const(const void* p) const; // Allocation (return NULL if full). Assumes the caller has established - // mutually exclusive access to the space. + // mutually exclusive access to the HeapRegion. HeapWord* allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); // Allocation (return NULL if full). Enforces mutual exclusion internally. HeapWord* par_allocate(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); - virtual HeapWord* allocate(size_t word_size); - virtual HeapWord* par_allocate(size_t word_size); + HeapWord* allocate(size_t word_size); + HeapWord* par_allocate(size_t word_size); HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; } // MarkSweep support phase3 - virtual HeapWord* initialize_threshold(); - virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end); - - virtual void print() const; + HeapWord* initialize_threshold(); + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); void reset_bot() { _bot_part.reset_bot(); @@ -186,34 +174,11 @@ void print_bot_on(outputStream* out) { _bot_part.print_on(out); } -}; - -class HeapRegion: public G1ContiguousSpace { - friend class VMStructs; - // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class - template - friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); - private: +private: // The remembered set for this region. - // (Might want to make this "inline" later, to avoid some alloc failure - // issues.) HeapRegionRemSet* _rem_set; - // Auxiliary functions for scan_and_forward support. - // See comments for CompactibleSpace for more information. - inline HeapWord* scan_limit() const { - return top(); - } - - inline bool scanned_block_is_obj(const HeapWord* addr) const { - return true; // Always true, since scan_limit is top - } - - inline size_t scanned_block_size(const HeapWord* addr) const { - return HeapRegion::block_size(addr); // Avoid virtual call - } - void report_region_type_change(G1HeapRegionTraceType::Type to); // Returns whether the given object address refers to a dead object, and either the @@ -223,7 +188,6 @@ // - not called on humongous objects or archive regions inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const; - protected: // The index of this region in the heap region sequence. uint _hrm_index; @@ -269,8 +233,6 @@ // "next" is the top at the start of the in-progress marking (if any.) HeapWord* _prev_top_at_mark_start; HeapWord* _next_top_at_mark_start; - // If a collection pause is in progress, this is the top at the start - // of that pause. void init_top_at_mark_start() { assert(_prev_marked_bytes == 0 && @@ -306,16 +268,14 @@ // Returns the block size of the given (dead, potentially having its class unloaded) object // starting at p extending to at most the prev TAMS using the given mark bitmap. inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMap* const prev_bitmap) const; - public: - HeapRegion(uint hrm_index, - G1BlockOffsetTable* bot, - MemRegion mr); +public: + HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr); // Initializing the HeapRegion not only resets the data structure, but also // resets the BOT for that heap region. // The default values for clear_space means that we will do the clearing if // there's clearing to be done ourselves. We also always mangle the space. - virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle); + void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle); static int LogOfHRGrainBytes; static int LogOfHRGrainWords; @@ -364,8 +324,6 @@ // objects to call size_t ApplyToMarkedClosure::apply(oop) for. template inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure); - // Override for scan_and_forward support. - void prepare_for_compaction(CompactPoint* cp); // Update heap region to be consistent after compaction. void complete_compaction(); @@ -707,8 +665,8 @@ // full GC. void verify(VerifyOption vo, bool *failures) const; - // Override; it uses the "prev" marking information - virtual void verify() const; + // Verify using the "prev" marking information + void verify() const; void verify_rem_set(VerifyOption vo, bool *failures) const; void verify_rem_set() const; @@ -724,7 +682,7 @@ bool _is_complete; void set_incomplete() { _is_complete = false; } - public: +public: HeapRegionClosure(): _is_complete(true) {} // Typically called on each region until it returns true. --- old/src/hotspot/share/gc/g1/heapRegion.inline.hpp 2019-11-06 20:19:05.585088197 +0100 +++ new/src/hotspot/share/gc/g1/heapRegion.inline.hpp 2019-11-06 20:19:05.385086380 +0100 @@ -29,22 +29,22 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" #include "gc/g1/heapRegion.hpp" -#include "gc/shared/space.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/align.hpp" +#include "utilities/globalDefinitions.hpp" -inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, - size_t desired_word_size, - size_t* actual_size) { +inline HeapWord* HeapRegion::allocate_impl(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { HeapWord* obj = top(); size_t available = pointer_delta(end(), obj); size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; set_top(new_top); - assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); *actual_size = want_to_allocate; return obj; } else { @@ -52,21 +52,21 @@ } } -inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, - size_t desired_word_size, - size_t* actual_size) { +inline HeapWord* HeapRegion::par_allocate_impl(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { do { HeapWord* obj = top(); size_t available = pointer_delta(end(), obj); size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; - HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); + HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. if (result == obj) { - assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); + assert(is_object_aligned(obj) && is_object_aligned(new_top), "checking alignment"); *actual_size = want_to_allocate; return obj; } @@ -76,9 +76,9 @@ } while (true); } -inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, - size_t desired_word_size, - size_t* actual_size) { +inline HeapWord* HeapRegion::allocate(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); if (res != NULL) { _bot_part.alloc_block(res, *actual_size); @@ -86,12 +86,12 @@ return res; } -inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { +inline HeapWord* HeapRegion::allocate(size_t word_size) { size_t temp; return allocate(word_size, word_size, &temp); } -inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { +inline HeapWord* HeapRegion::par_allocate(size_t word_size) { size_t temp; return par_allocate(word_size, word_size, &temp); } @@ -99,19 +99,18 @@ // Because of the requirement of keeping "_offsets" up to date with the // allocations, we sequentialize these with a lock. Therefore, best if // this is used for larger LAB allocations only. -inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, - size_t desired_word_size, - size_t* actual_size) { +inline HeapWord* HeapRegion::par_allocate(size_t min_word_size, + size_t desired_word_size, + size_t* actual_size) { MutexLocker x(&_par_alloc_lock); return allocate(min_word_size, desired_word_size, actual_size); } -inline HeapWord* G1ContiguousSpace::block_start(const void* p) { +inline HeapWord* HeapRegion::block_start(const void* p) { return _bot_part.block_start(p); } -inline HeapWord* -G1ContiguousSpace::block_start_const(const void* p) const { +inline HeapWord* HeapRegion::block_start_const(const void* p) const { return _bot_part.block_start_const(p); } @@ -134,8 +133,7 @@ return obj_is_dead; } -inline bool -HeapRegion::block_is_obj(const HeapWord* p) const { +inline bool HeapRegion::block_is_obj(const HeapWord* p) const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (!this->is_in(p)) { @@ -185,7 +183,7 @@ inline void HeapRegion::complete_compaction() { // Reset space and bot after compaction is complete if needed. reset_after_compaction(); - if (used_region().is_empty()) { + if (is_empty()) { reset_bot(); } @@ -202,7 +200,7 @@ template inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { - HeapWord* limit = scan_limit(); + HeapWord* limit = top(); HeapWord* next_addr = bottom(); while (next_addr < limit) { --- old/src/hotspot/share/gc/g1/vmStructs_g1.hpp 2019-11-06 20:19:06.269094411 +0100 +++ new/src/hotspot/share/gc/g1/vmStructs_g1.hpp 2019-11-06 20:19:06.065092558 +0100 @@ -37,11 +37,14 @@ static_field(HeapRegion, GrainBytes, size_t) \ static_field(HeapRegion, LogOfHRGrainBytes, int) \ \ - nonstatic_field(HeapRegion, _type, HeapRegionType) \ + nonstatic_field(HeapRegion, _type, HeapRegionType) \ + nonstatic_field(HeapRegion, _bottom, HeapWord*) \ + nonstatic_field(HeapRegion, _top, HeapWord* volatile) \ + nonstatic_field(HeapRegion, _end, HeapWord*) \ + nonstatic_field(HeapRegion, _compaction_top, HeapWord*) \ \ nonstatic_field(HeapRegionType, _tag, HeapRegionType::Tag volatile) \ \ - nonstatic_field(G1ContiguousSpace, _top, HeapWord* volatile) \ \ nonstatic_field(G1HeapRegionTable, _base, address) \ nonstatic_field(G1HeapRegionTable, _length, size_t) \ @@ -94,8 +97,7 @@ \ declare_type(G1CollectedHeap, CollectedHeap) \ \ - declare_type(G1ContiguousSpace, CompactibleSpace) \ - declare_type(HeapRegion, G1ContiguousSpace) \ + declare_toplevel_type(HeapRegion) \ declare_toplevel_type(HeapRegionManager) \ declare_toplevel_type(HeapRegionSetBase) \ declare_toplevel_type(G1MonitoringSupport) \ --- old/src/hotspot/share/gc/parallel/asPSYoungGen.cpp 2019-11-06 20:19:06.937100476 +0100 +++ new/src/hotspot/share/gc/parallel/asPSYoungGen.cpp 2019-11-06 20:19:06.733098623 +0100 @@ -30,7 +30,7 @@ #include "gc/parallel/psYoungGen.hpp" #include "gc/shared/gcUtil.hpp" #include "gc/shared/genArguments.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "utilities/align.hpp" --- old/src/hotspot/share/gc/parallel/mutableSpace.cpp 2019-11-06 20:19:07.609106572 +0100 +++ new/src/hotspot/share/gc/parallel/mutableSpace.cpp 2019-11-06 20:19:07.409104758 +0100 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "gc/parallel/mutableSpace.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" --- old/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2019-11-06 20:19:08.285112703 +0100 +++ new/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2019-11-06 20:19:08.077110817 +0100 @@ -48,7 +48,7 @@ #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/weakProcessor.hpp" #include "memory/universe.hpp" #include "logging/log.hpp" --- old/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp 2019-11-06 20:19:09.005119229 +0100 +++ new/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp 2019-11-06 20:19:08.785117235 +0100 @@ -31,7 +31,7 @@ #include "gc/parallel/psMarkSweepDecorator.hpp" #include "gc/parallel/psParallelCompact.inline.hpp" #include "gc/serial/markSweep.inline.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "memory/iterator.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" --- old/src/hotspot/share/gc/parallel/psOldGen.cpp 2019-11-06 20:19:09.709125605 +0100 +++ new/src/hotspot/share/gc/parallel/psOldGen.cpp 2019-11-06 20:19:09.505123758 +0100 @@ -33,7 +33,7 @@ #include "gc/parallel/psOldGen.hpp" #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/gcLocker.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2019-11-06 20:19:10.381131689 +0100 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2019-11-06 20:19:10.177129843 +0100 @@ -52,7 +52,7 @@ #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/weakProcessor.hpp" #include "gc/shared/workerPolicy.hpp" #include "gc/shared/workgroup.hpp" --- old/src/hotspot/share/gc/parallel/psScavenge.cpp 2019-11-06 20:19:11.157138710 +0100 +++ new/src/hotspot/share/gc/parallel/psScavenge.cpp 2019-11-06 20:19:10.945136793 +0100 @@ -48,7 +48,7 @@ #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/scavengableNMethods.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/weakProcessor.hpp" #include "gc/shared/workerPolicy.hpp" #include "gc/shared/workgroup.hpp" --- old/src/hotspot/share/gc/parallel/psYoungGen.cpp 2019-11-06 20:19:11.861145076 +0100 +++ new/src/hotspot/share/gc/parallel/psYoungGen.cpp 2019-11-06 20:19:11.657143232 +0100 @@ -30,7 +30,7 @@ #include "gc/parallel/psYoungGen.hpp" #include "gc/shared/gcUtil.hpp" #include "gc/shared/genArguments.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" --- old/src/hotspot/share/gc/serial/defNewGeneration.cpp 2019-11-06 20:19:12.581151582 +0100 +++ new/src/hotspot/share/gc/serial/defNewGeneration.cpp 2019-11-06 20:19:12.369149666 +0100 @@ -43,7 +43,7 @@ #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" #include "gc/shared/space.inline.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/weakProcessor.hpp" #include "logging/log.hpp" --- old/src/hotspot/share/gc/shared/generation.cpp 2019-11-06 20:19:13.289157976 +0100 +++ new/src/hotspot/share/gc/shared/generation.cpp 2019-11-06 20:19:13.065155955 +0100 @@ -35,7 +35,7 @@ #include "gc/shared/generation.hpp" #include "gc/shared/generationSpec.hpp" #include "gc/shared/space.inline.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" --- old/src/hotspot/share/gc/shared/space.cpp 2019-11-06 20:19:13.997164368 +0100 +++ new/src/hotspot/share/gc/shared/space.cpp 2019-11-06 20:19:13.769162310 +0100 @@ -31,7 +31,7 @@ #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/space.hpp" #include "gc/shared/space.inline.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" --- old/src/hotspot/share/gc/shared/spaceDecorator.cpp 2019-11-06 20:19:14.725170934 +0100 +++ new/src/hotspot/share/gc/shared/spaceDecorator.cpp 2019-11-06 20:19:14.501168913 +0100 @@ -23,8 +23,9 @@ */ #include "precompiled.hpp" +#include "gc/parallel/mutableSpace.hpp" #include "gc/shared/space.inline.hpp" -#include "gc/shared/spaceDecorator.hpp" +#include "gc/shared/spaceDecorator.inline.hpp" #include "logging/log.hpp" #include "utilities/copy.hpp" --- old/src/hotspot/share/gc/shared/spaceDecorator.hpp 2019-11-06 20:19:15.401177028 +0100 +++ new/src/hotspot/share/gc/shared/spaceDecorator.hpp 2019-11-06 20:19:15.201175226 +0100 @@ -25,8 +25,8 @@ #ifndef SHARE_GC_SHARED_SPACEDECORATOR_HPP #define SHARE_GC_SHARED_SPACEDECORATOR_HPP -#include "gc/parallel/mutableSpace.hpp" -#include "gc/shared/space.hpp" +#include "memory/allocation.hpp" +#include "memory/memRegion.hpp" #include "utilities/globalDefinitions.hpp" class SpaceDecorator: public AllStatic { @@ -120,6 +120,7 @@ }; class ContiguousSpace; +class MutableSpace; // For use with GenCollectedHeap's class GenSpaceMangler: public SpaceMangler { @@ -127,8 +128,8 @@ ContiguousSpace* sp() { return _sp; } - HeapWord* top() const { return _sp->top(); } - HeapWord* end() const { return _sp->end(); } + HeapWord* top() const; + HeapWord* end() const; public: GenSpaceMangler(ContiguousSpace* sp) : SpaceMangler(), _sp(sp) {} @@ -140,8 +141,8 @@ MutableSpace* sp() { return _sp; } - HeapWord* top() const { return _sp->top(); } - HeapWord* end() const { return _sp->end(); } + HeapWord* top() const; + HeapWord* end() const; public: MutableSpaceMangler(MutableSpace* sp) : SpaceMangler(), _sp(sp) {} --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java 2019-11-06 20:19:16.109183409 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java 2019-11-06 20:19:15.881181355 +0100 @@ -30,11 +30,11 @@ import java.util.Observer; import sun.jvm.hotspot.debugger.Address; +import sun.jvm.hotspot.gc.g1.HeapRegionClosure; +import sun.jvm.hotspot.gc.g1.PrintRegionClosure; import sun.jvm.hotspot.gc.shared.CollectedHeap; import sun.jvm.hotspot.gc.shared.CollectedHeapName; import sun.jvm.hotspot.gc.shared.LiveRegionsClosure; -import sun.jvm.hotspot.gc.shared.PrintRegionClosure; -import sun.jvm.hotspot.gc.shared.SpaceClosure; import sun.jvm.hotspot.memory.MemRegion; import sun.jvm.hotspot.runtime.VM; import sun.jvm.hotspot.runtime.VMObjectFactory; @@ -126,11 +126,11 @@ return hrm().heapRegionIterator(); } - public void heapRegionIterate(SpaceClosure scl) { + public void heapRegionIterate(HeapRegionClosure hrcl) { Iterator iter = heapRegionIterator(); while (iter.hasNext()) { HeapRegion hr = iter.next(); - scl.doSpace(hr); + hrcl.doHeapRegion(hr); } } --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegion.java 2019-11-06 20:19:16.789189533 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegion.java 2019-11-06 20:19:16.585187696 +0100 @@ -45,9 +45,11 @@ // any of its fields but only iterate over it. public class HeapRegion extends CompactibleSpace implements LiveRegionsProvider { - // static int GrainBytes; - static private CIntegerField grainBytesField; + private static AddressField bottomField; static private AddressField topField; + private static AddressField endField; + + static private CIntegerField grainBytesField; private static long typeFieldOffset; private static long pointerSize; @@ -64,8 +66,11 @@ static private synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("HeapRegion"); - grainBytesField = type.getCIntegerField("GrainBytes"); + bottomField = type.getAddressField("_bottom"); topField = type.getAddressField("_top"); + endField = type.getAddressField("_end"); + + grainBytesField = type.getCIntegerField("GrainBytes"); typeFieldOffset = type.getField("_type").getOffset(); pointerSize = db.lookupType("HeapRegion*").getSize(); @@ -82,9 +87,9 @@ type = (HeapRegionType)VMObjectFactory.newObject(HeapRegionType.class, typeAddr); } - public Address top() { - return topField.getValue(addr); - } + public Address bottom() { return bottomField.getValue(addr); } + public Address top() { return topField.getValue(addr); } + public Address end() { return endField.getValue(addr); } @Override public List getLiveRegions() { @@ -93,12 +98,16 @@ return res; } - @Override + /** Returns a subregion of the space containing all the objects in + the space. */ + public MemRegion usedRegion() { + return new MemRegion(bottom(), end()); + } + public long used() { return top().minus(bottom()); } - @Override public long free() { return end().minus(top()); } --- old/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp 2019-11-06 20:19:17.493195868 +0100 +++ new/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp 2019-11-06 20:19:17.285193998 +0100 @@ -26,6 +26,7 @@ #include "gc/g1/g1Arguments.hpp" #include "gc/g1/g1HeapVerifier.hpp" #include "logging/logConfiguration.hpp" +#include "logging/logTag.hpp" #include "logging/logTestFixture.hpp" #include "unittest.hpp" --- /dev/null 2019-11-06 16:17:24.858704995 +0100 +++ new/src/hotspot/share/gc/shared/spaceDecorator.inline.hpp 2019-11-06 20:19:17.949199972 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_SPACEDECORATOR_INLINE_HPP +#define SHARE_GC_SHARED_SPACEDECORATOR_INLINE_HPP + +#include "gc/parallel/mutableSpace.hpp" +#include "gc/shared/space.hpp" +#include "gc/shared/spaceDecorator.hpp" +#include "utilities/globalDefinitions.hpp" + +inline HeapWord* GenSpaceMangler::top() const { return _sp->top(); } +inline HeapWord* GenSpaceMangler::end() const { return _sp->end(); } + +inline HeapWord* MutableSpaceMangler::top() const { return _sp->top(); } +inline HeapWord* MutableSpaceMangler::end() const { return _sp->end(); } + +#endif // SHARE_GC_SHARED_SPACEDECORATOR_INLINE_HPP --- /dev/null 2019-11-06 16:17:24.858704995 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionClosure.java 2019-11-06 20:19:18.589205726 +0100 @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc.g1; + +public interface HeapRegionClosure { + public void doHeapRegion(HeapRegion hr); +} --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/PrintRegionClosure.java 2019-11-06 20:19:19.537214245 +0100 +++ /dev/null 2019-11-06 16:17:24.858704995 +0100 @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.shared; - -import java.io.PrintStream; -import sun.jvm.hotspot.gc.g1.HeapRegion; - -public class PrintRegionClosure implements SpaceClosure { - private PrintStream tty; - - public PrintRegionClosure(PrintStream tty) { - this.tty = tty; - } - - public void doSpace(Space hr) { - ((HeapRegion)hr).printOn(tty); - } -} --- /dev/null 2019-11-06 16:17:24.858704995 +0100 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/PrintRegionClosure.java 2019-11-06 20:19:19.261211766 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc.g1; + +import java.io.PrintStream; +import sun.jvm.hotspot.gc.g1.HeapRegion; + +public class PrintRegionClosure implements HeapRegionClosure { + private PrintStream tty; + + public PrintRegionClosure(PrintStream tty) { + this.tty = tty; + } + + public void doHeapRegion(HeapRegion hr) { + hr.printOn(tty); + } +}