--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-06-11 15:12:55.012511714 -0400 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-06-11 15:12:53.672435068 -0400 @@ -915,16 +915,15 @@ } bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) { - // Check whether the size would be considered humongous for a minimum-sized region. + // Allocations in archive regions cannot be of a size that would be considered + // humongous even for a minimum-sized region, because G1 region sizes/boundaries + // may be different at archive-restore time. return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words()); } HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) { assert_at_safepoint(true /* should_be_vm_thread */); assert(_archive_allocator != NULL, "_archive_allocator not initialized"); - - // Return NULL if the size would be considered humongous for a minimum-sized region. - // Otherwise, attempt to perform the allocation in the archive space. if (is_archive_alloc_too_large(word_size)) { return NULL; } @@ -932,186 +931,183 @@ } void G1CollectedHeap::end_archive_alloc_range(GrowableArray* ranges, - uint end_alignment) { + size_t end_alignment_in_bytes) { assert_at_safepoint(true /* should_be_vm_thread */); assert(_archive_allocator != NULL, "_archive_allocator not initialized"); // Call complete_archive to do the real work, filling in the MemRegion // array with the archive regions. - _archive_allocator->complete_archive(ranges, end_alignment); - _archive_allocator->~G1ArchiveAllocator(); + _archive_allocator->complete_archive(ranges, end_alignment_in_bytes); + delete _archive_allocator; _archive_allocator = NULL; } - -void G1CollectedHeap::fill_with_non_humongous_objects(HeapWord* base_address, size_t word_size) { - // Create filler objects for the specified range, being careful not to - // create any humongous objects. - if (!is_humongous(word_size)) { - CollectedHeap::fill_with_object(base_address, word_size); - } else { - size_t remainder = word_size; - size_t increment = humongous_threshold_for(HeapRegion::GrainWords) / 2; - HeapWord* fill_top = base_address; - // Don't let remainder get smaller than the minimum filler object size. - while ((remainder > increment) && (remainder - increment >= min_fill_size())) { - CollectedHeap::fill_with_object(fill_top, increment); - fill_top += increment; - remainder -= increment; - } - if (remainder != 0) { - CollectedHeap::fill_with_object(fill_top, remainder); - } - } -} - -bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, uint count) { - MemRegion mr = _hrm.reserved(); - for (uint i = 0; i < count; i++) { - if (!mr.contains(ranges[i].start()) || !mr.contains(ranges[i].last())) { + +bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) { + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); + MemRegion reserved = _hrm.reserved(); + for (size_t i = 0; i < count; i++) { + if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) { return false; } } return true; } -bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, uint count) { +bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) { + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); MutexLockerEx x(Heap_lock); - MemRegion mr = _hrm.reserved(); - HeapWord* prev_end_addr = NULL; - uint prev_end_index = 0; + MemRegion reserved = _hrm.reserved(); + HeapWord* prev_last_addr = NULL; + HeapRegion* prev_last_region = NULL; - // Temporarily disable pretouching of heap pages. This interface is used + // Temporarily disable pretouching of heap pages. This interface is used // when mmap'ing archived heap data in, so pre-touching is wasted. FlagSetting fs(AlwaysPreTouch, false); - // Enable archive object checking in G1MarkSweep. We have to let it know + // Enable archive object checking in G1MarkSweep. We have to let it know // about each archive range, so that objects in those ranges aren't marked. G1MarkSweep::enable_archive_object_check(); // For each specified MemRegion range, allocate the corresponding G1 - // regions and mark them as archive regions. - // We expect the ranges in ascending order, without overlap. - for (uint i = 0; i < count; i++) { - HeapWord* base_address = ranges[i].start(); - size_t word_size = ranges[i].word_size(); - HeapWord* end_address = ranges[i].last(); - - assert((base_address > prev_end_addr) && (base_address < end_address), - "invalid range specification"); - - prev_end_addr = end_address; - uint start_index = _hrm.addr_to_index(base_address); - uint end_index = _hrm.addr_to_index(end_address); - - // Check for ranges that begin/end in the same G1 region - // as as the previous range. - if (start_index == prev_end_index) { - if (end_index == prev_end_index) { - break; - } - start_index++; - } - prev_end_index = end_index; - - // Ensure that each contained G1 region is available and free, - // returning false if not. - for (uint curr_index = start_index; curr_index <= end_index; curr_index++) { - HeapRegion* curr_region; - if ((curr_region = _hrm.at_or_null(curr_index)) == NULL) { - ergo_verbose1(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("pinning region") - ergo_format_byte("region size"), - HeapRegion::GrainWords * HeapWordSize); - _hrm.expand_at(curr_index, 1); - } else { - if (!curr_region->is_free()) { - return false; - } - } + // regions and mark them as archive regions. We expect the ranges in + // ascending starting address order, without overlap. + for (size_t i = 0; i < count; i++) { + MemRegion curr_range = ranges[i]; + HeapWord* start_address = curr_range.start(); + size_t word_size = curr_range.word_size(); + HeapWord* last_address = curr_range.last(); + size_t commits = 0; + + guarantee(reserved.contains(start_address) && reserved.contains(last_address), + err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", + p2i(start_address), p2i(last_address))); + guarantee(start_address > prev_last_addr, + err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , + p2i(start_address), p2i(prev_last_addr))); + prev_last_addr = last_address; + + // Check for ranges that start in the same G1 region in which the previous + // range ended, and adjust the start address so we don't try to allocate + // the same region again. If the current range is entirely within that + // region, skip it, just adjusting the recorded top. + HeapRegion* start_region = _hrm.addr_to_region(start_address); + if ((prev_last_region != NULL) && (start_region == prev_last_region)) { + start_address = start_region->end(); + if (start_address > last_address) { + _allocator->increase_used(word_size * HeapWordSize); + start_region->set_top(last_address + 1); + continue; + } + start_region->set_top(start_address); + curr_range = MemRegion(start_address, last_address + 1); + start_region = _hrm.addr_to_region(start_address); + } + + // Perform the actual region allocation, exiting if it fails. + // Then note how much new space we have allocated. + if (!_hrm.allocate_containing_regions(curr_range, &commits)) { + return false; } - - _hrm.allocate_free_regions_starting_at(start_index, (end_index - start_index) + 1); _allocator->increase_used(word_size * HeapWordSize); - - // Mark each G1 region touched by the range as archive, add it to the old set, and set - // the allocation context and top. - for (uint i = start_index; i <= end_index; i++) { - HeapRegion* curr = region_at(i); - assert(curr->is_empty() && !curr->is_pinned(), "Invalid MemRegion"); - _hr_printer.alloc(curr, G1HRPrinter::Archive); - curr->set_allocation_context(AllocationContext::system()); - if (i != end_index) { - curr->set_top(curr->end()); + if (commits != 0) { + ergo_verbose1(ErgoHeapSizing, + "attempt heap expansion", + ergo_format_reason("allocate archive regions") + ergo_format_byte("total size"), + HeapRegion::GrainWords * HeapWordSize * commits); + } + + // Mark each G1 region touched by the range as archive, add it to the old set, + // and set the allocation context and top. + HeapRegion* curr_region = _hrm.addr_to_region(start_address); + HeapRegion* last_region = _hrm.addr_to_region(last_address); + prev_last_region = last_region; + + while (curr_region != NULL) { + assert(curr_region->is_empty() && !curr_region->is_pinned(), + err_msg("Region already in use (index %u)", curr_region->hrm_index())); + _hr_printer.alloc(curr_region, G1HRPrinter::Archive); + curr_region->set_allocation_context(AllocationContext::system()); + curr_region->set_archive(); + _old_set.add(curr_region); + if (curr_region != last_region) { + curr_region->set_top(curr_region->end()); + curr_region = _hrm.next_region_in_heap(curr_region); } else { - curr->set_top(end_address + 1); + curr_region->set_top(last_address + 1); + curr_region = NULL; } - curr->set_archive(); - _old_set.add(curr); } // Notify mark-sweep of the archive range. - G1MarkSweep::mark_range_archive(base_address, end_address); + G1MarkSweep::mark_range_archive(curr_range); } return true; } -void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, uint count) { - MemRegion mr = _hrm.reserved(); - HeapWord *prev_end_addr = NULL; - uint prev_end_index = 0; +void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); + MemRegion reserved = _hrm.reserved(); + HeapWord *prev_last_addr = NULL; + HeapRegion* prev_last_region = NULL; // For each MemRegion, create filler objects, if needed, in the G1 regions - // that contain the address range. The address range actually within the - // MemRegion will not be modified. That is assumed to have been initialized + // that contain the address range. The address range actually within the + // MemRegion will not be modified. That is assumed to have been initialized // elsewhere, probably via an mmap of archived heap data. MutexLockerEx x(Heap_lock); - for (uint i = 0; i < count; i++) { - HeapWord* base_address = ranges[i].start(); - size_t word_size = ranges[i].word_size(); - HeapWord* end_address = ranges[i].last(); - - assert(mr.contains(base_address) && mr.contains(end_address), - "MemRegion outside of heap"); - - uint start_index = _hrm.addr_to_index(base_address); - uint end_index = _hrm.addr_to_index(end_address); - HeapRegion* start_region = _hrm.addr_to_region(base_address); - HeapRegion* end_region = _hrm.addr_to_region(end_address); + for (size_t i = 0; i < count; i++) { + HeapWord* start_address = ranges[i].start(); + HeapWord* last_address = ranges[i].last(); + + assert(reserved.contains(start_address) && reserved.contains(last_address), + err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", + p2i(start_address), p2i(last_address))); + assert(start_address > prev_last_addr, + err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , + p2i(start_address), p2i(prev_last_addr))); + + HeapRegion* start_region = _hrm.addr_to_region(start_address); + HeapRegion* last_region = _hrm.addr_to_region(last_address); HeapWord* bottom_address = start_region->bottom(); // Check for a range beginning in the same region in which the // previous one ended. - if (start_index == prev_end_index) { - bottom_address = prev_end_addr; - start_index++; + if (start_region == prev_last_region) { + bottom_address = prev_last_addr + 1; } -#ifdef ASSERT - // Verify the regions were all marked as archive regions by - // alloc_fixed_ranges. - for (uint i = start_index; i <= end_index; i++) { - HeapRegion* curr = region_at(i); - assert(curr->is_archive(), "Invalid range in fill_archive_regions"); + // Verify that the regions were all marked as archive regions by + // alloc_archive_regions. + HeapRegion* curr_region = start_region; + while (curr_region != NULL) { + guarantee(curr_region->is_archive(), + err_msg("Expected archive region at index %u", curr_region->hrm_index())); + if (curr_region != last_region) { + curr_region = _hrm.next_region_in_heap(curr_region); + } else { + curr_region = NULL; + } } -#endif - prev_end_addr = base_address + word_size; - prev_end_index = end_index; + prev_last_addr = last_address; + prev_last_region = last_region; - // Fill the low part of the first allocated region with dummy object(s), - // if the region base does not match the range address, or if the previous + // Fill the memory below the allocated range with dummy object(s), + // if the region bottom does not match the range start, or if the previous // range ended within the same G1 region, and there is a gap. - if (base_address != bottom_address) { - size_t fill_size = base_address - bottom_address; - G1CollectedHeap::fill_with_non_humongous_objects(bottom_address, fill_size); + if (start_address != bottom_address) { + size_t fill_size = pointer_delta(start_address, bottom_address); + G1CollectedHeap::fill_with_objects(bottom_address, fill_size); _allocator->increase_used(fill_size * HeapWordSize); } } } - HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { @@ -1960,6 +1956,10 @@ _allocator = G1Allocator::create_allocator(this); _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); + // Override the default _filler_array_max_size so that no humongous filler + // objects are created. + _filler_array_max_size = _humongous_object_threshold_in_words; + int n_queues = (int)ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); @@ -3114,7 +3114,6 @@ size_t live_bytes() { return _live_bytes; } }; - class VerifyArchiveOopClosure: public OopClosure { public: VerifyArchiveOopClosure(HeapRegion *hr) { } @@ -3124,14 +3123,15 @@ template void do_oop_work(T *p) { oop obj = oopDesc::load_decode_heap_oop(p); guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj), - "Archive object references a non-pinned object"); + err_msg("Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, + p2i(p), p2i(obj))); } }; class VerifyArchiveRegionClosure: public ObjectClosure { public: VerifyArchiveRegionClosure(HeapRegion *hr) { } - // Verify that all object pointers are to pinned regions. + // Verify that all object pointers are to archive regions. void do_object(oop o) { VerifyArchiveOopClosure checkOop(NULL); assert(o != NULL, "Should not be here for NULL oops"); @@ -3139,7 +3139,6 @@ } }; - class VerifyRegionClosure: public HeapRegionClosure { private: bool _par; @@ -3364,7 +3363,7 @@ case VerifyOption_G1UseMarkWord: { HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj); return !obj->is_gc_marked() && !hr->is_archive(); - } + } default: ShouldNotReachHere(); } return false; // keep some compilers happy @@ -6362,8 +6361,7 @@ // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the // humongous regions set. - // We ignore archive regions. - assert(r->is_free() || r->is_young() || r->is_humongous() || r->is_archive(), + assert(r->is_free() || r->is_young() || r->is_humongous(), "it cannot be another type"); } return false; @@ -6421,7 +6419,7 @@ assert(!r->is_young(), "we should not come across young regions"); if (r->is_humongous()) { - // We ignore humongous regions, we left the humongous set unchanged + // We ignore humongous regions. We left the humongous set unchanged. } else { // Objects that were compacted would have ended up on regions // that were previously old or free. Archive regions (which are @@ -6561,9 +6559,9 @@ _hr_printer.retire(alloc_region); } -HeapRegion* G1CollectedHeap::alloc_highest_available_region() { +HeapRegion* G1CollectedHeap::alloc_highest_free_region() { bool expanded = false; - uint index = _hrm.find_highest_available(&expanded); + uint index = _hrm.find_highest_free(&expanded); if (index != G1_NO_HRM_INDEX) { if (expanded) { @@ -6616,6 +6614,8 @@ assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index())); _old_count.increment(1u, hr->capacity()); } else { + // There are no other valid region types. Check for one invalid + // one we can identify: pinned without old or humongous set. assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index())); ShouldNotReachHere(); }