diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp 2015-06-10 21:31:52.933471744 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.cpp 2015-06-10 10:37:04.980351153 -0400 @@ -107,7 +107,8 @@ HeapRegion *curr = regions_at(index++); guarantee(curr != NULL, "Regions in _regions array cannot be NULL"); guarantee(!curr->is_young(), "should not be young!"); - guarantee(!curr->is_pinned(), "should not be pinned!"); + guarantee(!curr->is_pinned(), + err_msg("Pinned region should not be in collection set (index %u)", curr->hrm_index())); if (prev != NULL) { guarantee(order_regions(prev, curr) != 1, err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", @@ -150,7 +151,7 @@ void CollectionSetChooser::add_region(HeapRegion* hr) { assert(!hr->is_pinned(), - "Pinned regions shouldn't be added to the collection set"); + err_msg("Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index())); assert(!hr->is_young(), "should not be young!"); _regions.append(hr); _length++; diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp 2015-06-10 21:31:52.937471972 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/collectionSetChooser.hpp 2015-06-10 10:32:51.533857915 -0400 @@ -103,9 +103,8 @@ void sort_regions(); // Determine whether to add the given region to the CSet chooser or - // not. Currently, we skip humongous and archive regions, which are both - // "pinned," and regions whose live bytes are over the threshold. Humongous - // regions may be reclaimed during cleanup. + // not. Currently, we skip pinned regions and regions whose live + // bytes are over the threshold. Humongous regions may be reclaimed during cleanup. bool should_add(HeapRegion* hr) { assert(hr->is_marked(), "pre-condition"); assert(!hr->is_young(), "should never consider young regions"); diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.cpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.cpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.cpp 2015-06-10 21:31:52.937471972 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.cpp 2015-06-10 15:59:52.283983376 -0400 @@ -46,7 +46,7 @@ HeapRegion* retained_region = *retained_old; *retained_old = NULL; assert(retained_region == NULL || !retained_region->is_archive(), - "Archive region should not be alloc region"); + err_msg("Archive region should not be alloc region (index %u)", retained_region->hrm_index())); // We will discard the current GC alloc region if: // a) it's in the collection set (it can happen!), @@ -172,7 +172,6 @@ } } - G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h) { // Create the archive allocator, and also enable archive object checking // in mark-sweep, since we will be creating archive regions. @@ -189,7 +188,7 @@ if (hr == NULL) { return false; } - assert(hr->is_empty(), "expected empty region"); + assert(hr->is_empty(), err_msg("expected empty region (index %u)", hr->hrm_index())); hr->set_archive(); _g1h->_old_set.add(hr); _g1h->_hr_printer.alloc(hr, G1HRPrinter::Archive); @@ -210,6 +209,7 @@ } HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { + assert(word_size != 0, "size must not be zero"); if (_allocation_region == NULL) { if (!alloc_new_region()) { return NULL; @@ -296,9 +296,11 @@ // Loop through the allocated regions, and create MemRegions summarizing // the allocated address range, combining contiguous ranges. Add the - // MemRegions to the growable array provided by the caller. + // MemRegions to the GrowableArray provided by the caller. int index = _allocated_regions.length() - 1; - assert(_allocated_regions.at(index) == _allocation_region, "expect current region at end of array"); + assert(_allocated_regions.at(index) == _allocation_region, + err_msg("expected region %u at end of array, found %u", + _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index())); HeapWord* base_address = _allocation_region->bottom(); HeapWord* top = base_address; @@ -314,6 +316,7 @@ index = index - 1; } + assert(top != base_address, err_msg("zero-sized range, address " PTR_FORMAT, p2i(base_address))); ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); _allocated_regions.clear(); _allocation_region = NULL; diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.hpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.hpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.hpp 2015-06-10 21:31:52.937471972 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1Allocator.hpp 2015-06-10 13:27:01.149479069 -0400 @@ -269,6 +269,10 @@ virtual void waste(size_t& wasted, size_t& undo_wasted); }; +// G1ArchiveAllocator is used to allocate memory in archive +// regions. Such regions are not modifiable by GC, being neither +// scavenged nor compacted, or even marked in the object header. +// They can contain no pointers to non-archive heap regions, class G1ArchiveAllocator : public CHeapObj { protected: @@ -304,7 +308,7 @@ _top(NULL), _max(NULL) { } - ~G1ArchiveAllocator() { + virtual ~G1ArchiveAllocator() { assert(_allocation_region == NULL, "_allocation_region not NULL"); } diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-06-10 21:31:52.941472201 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-06-10 16:11:00.054275870 -0400 @@ -938,39 +938,13 @@ // Call complete_archive to do the real work, filling in the MemRegion // array with the archive regions. _archive_allocator->complete_archive(ranges, end_alignment_in_bytes); - _archive_allocator->~G1ArchiveAllocator(); + delete _archive_allocator; _archive_allocator = NULL; } -void G1CollectedHeap::fill_with_non_humongous_objects(HeapWord* start_address, size_t word_size) { -#ifdef ASSERT - HeapRegion* start_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(start_address + word_size - 1); - assert(start_region == last_region, err_msg("attempting to fill across region boundary, from " - PTR_FORMAT " to " PTR_FORMAT, - p2i(start_address), p2i(start_address + word_size - 1))); -#endif - - if (!is_humongous(word_size)) { - CollectedHeap::fill_with_object(start_address, word_size); - } else { - size_t remainder = word_size; - size_t increment = humongous_threshold_for(HeapRegion::GrainWords) / 2; - HeapWord* fill_top = start_address; - // Don't let remainder get smaller than the minimum filler object size. - while ((remainder > increment) && (remainder - increment >= min_fill_size())) { - CollectedHeap::fill_with_object(fill_top, increment); - fill_top += increment; - remainder -= increment; - } - if (remainder != 0) { - CollectedHeap::fill_with_object(fill_top, remainder); - } - } -} - bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) { - assert((ranges != NULL) && (count != 0), "No MemRegions provided"); + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); MemRegion reserved = _hrm.reserved(); for (size_t i = 0; i < count; i++) { if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) { @@ -981,7 +955,8 @@ } bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) { - assert((ranges != NULL) && (count != 0), "No MemRegions provided"); + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); MutexLockerEx x(Heap_lock); MemRegion reserved = _hrm.reserved(); @@ -1004,7 +979,7 @@ HeapWord* start_address = curr_range.start(); size_t word_size = curr_range.word_size(); HeapWord* last_address = curr_range.last(); - int commits = 0; + size_t commits = 0; guarantee(reserved.contains(start_address) && reserved.contains(last_address), err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", @@ -1074,7 +1049,8 @@ } void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { - assert((ranges != NULL) && (count != 0), "No MemRegions provided"); + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); MemRegion reserved = _hrm.reserved(); HeapWord *prev_last_addr = NULL; HeapRegion* prev_last_region = NULL; @@ -1126,13 +1102,12 @@ // range ended within the same G1 region, and there is a gap. if (start_address != bottom_address) { size_t fill_size = pointer_delta(start_address, bottom_address); - G1CollectedHeap::fill_with_non_humongous_objects(bottom_address, fill_size); + G1CollectedHeap::fill_with_objects(bottom_address, fill_size); _allocator->increase_used(fill_size * HeapWordSize); } } } - HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { @@ -1981,6 +1956,10 @@ _allocator = G1Allocator::create_allocator(this); _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); + // Override the default _filler_array_max_size so that no humongous filler + // objects are created. + _filler_array_max_size = _humongous_object_threshold_in_words; + int n_queues = (int)ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); @@ -3135,7 +3114,6 @@ size_t live_bytes() { return _live_bytes; } }; - class VerifyArchiveOopClosure: public OopClosure { public: VerifyArchiveOopClosure(HeapRegion *hr) { } @@ -3145,7 +3123,8 @@ template void do_oop_work(T *p) { oop obj = oopDesc::load_decode_heap_oop(p); guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj), - "Archive object references a non-archive object"); + err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, + p2i(p), p2i(obj))); } }; @@ -3160,7 +3139,6 @@ } }; - class VerifyRegionClosure: public HeapRegionClosure { private: bool _par; @@ -6383,8 +6361,7 @@ // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the // humongous regions set. - // We ignore archive regions. - assert(r->is_free() || r->is_young() || r->is_humongous() || r->is_archive(), + assert(r->is_free() || r->is_young() || r->is_humongous(), "it cannot be another type"); } return false; @@ -6442,7 +6419,7 @@ assert(!r->is_young(), "we should not come across young regions"); if (r->is_humongous()) { - // We ignore humongous regions, we left the humongous set unchanged + // We ignore humongous regions. We left the humongous set unchanged. } else { // Objects that were compacted would have ended up on regions // that were previously old or free. Archive regions (which are @@ -6637,6 +6614,8 @@ assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index())); _old_count.increment(1u, hr->capacity()); } else { + // There are no other valid region types. Check for one invalid + // one we can identify: pinned without old or humongous set. assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index())); ShouldNotReachHere(); } Only in /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1: g1CollectedHeap.cpp.orig diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-06-10 21:31:52.941472201 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-06-10 14:44:50.609504421 -0400 @@ -575,6 +575,10 @@ void retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest); + // Allocate the highest free region in the reserved heap. This will commit + // regions as necessary. + HeapRegion* alloc_highest_free_region(); + // - if explicit_gc is true, the GC is for a System.gc() or a heap // inspection request and should collect the entire heap // - if clear_all_soft_refs is true, all soft references should be @@ -740,7 +744,7 @@ // same fixed addresses in a subsequent JVM invocation. void begin_archive_alloc_range(); - // Check if the requested size wouild be too large for an archive allocation. + // Check if the requested size would be too large for an archive allocation. bool is_archive_alloc_too_large(size_t word_size); // Allocate memory of the requested size from the archive region. This will @@ -753,10 +757,6 @@ void end_archive_alloc_range(GrowableArray* ranges, size_t end_alignment_in_bytes = 0); - // Allocate the highest free region in the reserved heap. This will commit - // regions as necessary. - HeapRegion* alloc_highest_free_region(); - // Facility for allocating a fixed range within the heap and marking // the containing regions as 'archive'. For use at JVM init time, when the // caller may mmap archived heap data at the specified range(s). @@ -774,10 +774,6 @@ // alloc_archive_regions, and after class loading has occurred. void fill_archive_regions(MemRegion* range, size_t count); - // Fill the requested range without creating any humongous objects. - // The range cannot cross region boundaries. - void fill_with_non_humongous_objects(HeapWord* start_address, size_t word_size); - protected: // Shrink the garbage-first heap by at most the given size (in bytes!). Only in /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1: g1CollectedHeap.hpp.orig diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp 2015-06-10 21:31:52.945472432 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp 2015-06-10 14:48:58.879750102 -0400 @@ -278,10 +278,8 @@ } hr->reset_during_compaction(); } - } else { - if (!hr->is_pinned()) { - hr->compact(); - } + } else if (!hr->is_pinned()) { + hr->compact(); } return false; } @@ -382,10 +380,8 @@ } else { assert(hr->is_continues_humongous(), "Invalid humongous."); } - } else { - if (!hr->is_pinned()) { - prepare_for_compaction(hr, hr->end()); - } + } else if (!hr->is_pinned()) { + prepare_for_compaction(hr, hr->end()); } return false; } diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp 2015-06-10 21:31:52.945472432 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.cpp 2015-06-10 15:56:45.493268109 -0400 @@ -282,8 +282,8 @@ // Loop downwards from the highest region index, looking for an // entry which is either free or not yet committed. If not yet // committed, expand_at that index. - int curr = max_length() - 1; - while (curr >= 0) { + uint curr = max_length() - 1; + while (true) { HeapRegion *hr = _regions.get_by_index(curr); if (hr == NULL) { uint res = expand_at(curr, 1); @@ -297,15 +297,17 @@ return curr; } } + if (curr == 0) { + return G1_NO_HRM_INDEX; + } curr--; } - return G1_NO_HRM_INDEX; } -bool HeapRegionManager::allocate_containing_regions(MemRegion range, int* commit_count) { - int commits = 0; - uint start_index = _regions.get_index_by_address(range.start()); - uint last_index = _regions.get_index_by_address(range.last()); +bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) { + size_t commits = 0; + uint start_index = (uint)_regions.get_index_by_address(range.start()); + uint last_index = (uint)_regions.get_index_by_address(range.last()); // Ensure that each G1 region in the range is free, returning false if not. // Commit those that are not yet available, and keep count. @@ -483,7 +485,7 @@ num_committed++; HeapRegion* hr = _regions.get_by_index(i); guarantee(hr != NULL, err_msg("invariant: i: %u", i)); - guarantee(!prev_committed || hr->bottom() == prev_end || hr->is_archive(), + guarantee(!prev_committed || hr->bottom() == prev_end, err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); guarantee(hr->hrm_index() == i, diff -ur /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp --- /export/users/trbenson/ArchiveRegionsWebrev.02/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp 2015-06-10 21:31:52.945472432 -0400 +++ /export/users/trbenson/ArchiveRegions/hs-gc/hotspot/src/share/vm/gc/g1/heapRegionManager.hpp 2015-06-10 15:43:43.804374798 -0400 @@ -229,7 +229,7 @@ // Allocate the regions that contain the address range specified, committing the // regions if necessary. Return false if any of the regions is already committed // and not free, and return the number of regions newly committed in commit_count. - bool allocate_containing_regions(MemRegion range, int* commit_count); + bool allocate_containing_regions(MemRegion range, size_t* commit_count); // Apply blk->doHeapRegion() on all committed regions in address order, // terminating the iteration early if doHeapRegion() returns true.