--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-05-27 14:30:30.326483909 -0400 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-05-27 14:30:28.982407320 -0400 @@ -404,7 +404,7 @@ // can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { HeapRegion* hr = heap_region_containing(p); - return !hr->is_humongous(); + return !hr->is_pinned(); } // Private methods. @@ -907,6 +907,213 @@ return NULL; } +void G1CollectedHeap::begin_record_alloc_range() { + assert_at_safepoint(true /* should_be_vm_thread */); + if (_recording_allocator == NULL) { + _recording_allocator = G1RecordingAllocator::create_allocator(this); + } +} + +bool G1CollectedHeap::is_record_alloc_too_large(size_t word_size) { + // Check whether the size would be considered humongous for a minimum-sized region. + return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words()); +} + +HeapWord* G1CollectedHeap::record_mem_allocate(size_t word_size) { + assert_at_safepoint(true /* should_be_vm_thread */); + assert(_recording_allocator != NULL, "_recording_allocator not initialized"); + + // Return NULL if the size would be considered humongous for a minimum-sized region. + // Otherwise, attempt to perform the allocation in the recorded space. + if (is_record_alloc_too_large(word_size)) { + return NULL; + } + return _recording_allocator->record_mem_allocate(word_size); +} + +void G1CollectedHeap::end_record_alloc_range(GrowableArray* ranges, + uint end_alignment) { + assert_at_safepoint(true /* should_be_vm_thread */); + assert(_recording_allocator != NULL, "recording allocator uninitialized"); + + // Call complete_recording to do the real work, filling in the MemRegion + // array with the recorded regions. + _recording_allocator->complete_recording(ranges, end_alignment); +} + +void G1CollectedHeap::fill_with_non_humongous_objects(HeapWord* base_address, size_t word_size) { + // Create filler objects for the specified range, being careful not to + // create any humongous objects. + if (!is_humongous(word_size)) { + CollectedHeap::fill_with_object(base_address, word_size); + } else { + size_t remainder = word_size; + size_t increment = humongous_threshold_for(HeapRegion::GrainWords) / 2; + HeapWord* fill_top = base_address; + // Don't let remainder get smaller than the minimum filler object size. + while ((remainder > increment) && (remainder - increment >= min_fill_size())) { + CollectedHeap::fill_with_object(fill_top, increment); + fill_top += increment; + remainder -= increment; + } + if (remainder != 0) { + CollectedHeap::fill_with_object(fill_top, remainder); + } + } +} + +bool +G1CollectedHeap::check_archive_addresses(MemRegion* ranges, uint count) { + MemRegion mr = _hrm.reserved(); + for (uint i = 0; i < count; i++) { + if (!mr.contains(ranges[i].start()) || !mr.contains(ranges[i].last())) { + return false; + } + } + return true; +} + +bool +G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, uint count) { + MutexLockerEx x(Heap_lock); + + MemRegion mr = _hrm.reserved(); + HeapWord* prev_end_addr = NULL; + uint prev_end_index = 0; + + // Temporarily disable pretouching of heap pages. This interface is used + // when mmap'ing archived heap data in, so pre-touching is wasted. + FlagSetting fs(AlwaysPreTouch, false); + + // Enable archive object checking in G1MarkSweep. We have to let it know + // about each archive range, so that objects in those ranges aren't marked. + G1MarkSweep::enable_archive_object_check(); + + // For each specified MemRegion range, allocate the corresponding G1 + // regions and mark them as archive regions. + // We expect the ranges in ascending order, without overlap. + for (uint i = 0; i < count; i++) { + HeapWord* base_address = ranges[i].start(); + size_t word_size = ranges[i].word_size(); + HeapWord* end_address = ranges[i].last(); + + assert((base_address > prev_end_addr) && (base_address < end_address), + "invalid range specification"); + + prev_end_addr = end_address; + uint start_index = _hrm.addr_to_index(base_address); + uint end_index = _hrm.addr_to_index(end_address); + + // Check for ranges that begin/end in the same G1 region + // as as the previous range. + if (start_index == prev_end_index) { + if (end_index == prev_end_index) { + break; + } + start_index++; + } + prev_end_index = end_index; + + // Ensure that each contained G1 region is available and free, + // returning false if not. + for (uint curr_index = start_index; curr_index <= end_index; curr_index++) { + HeapRegion* curr_region; + if ((curr_region = _hrm.at_or_null(curr_index)) == NULL) { + ergo_verbose1(ErgoHeapSizing, + "attempt heap expansion", + ergo_format_reason("pinning region") + ergo_format_byte("region size"), + HeapRegion::GrainWords * HeapWordSize); + _hrm.expand_at(curr_index, 1); + } else { + if (!curr_region->is_free()) { + return false; + } + } + } + + _hrm.allocate_free_regions_starting_at(start_index, (end_index - start_index) + 1); + _allocator->increase_used(word_size * HeapWordSize); + + // Mark each G1 region touched by the range as archive, add it to the old set, and set + // the allocation context and top. + for (uint i = start_index; i <= end_index; i++) { + HeapRegion* curr = region_at(i); + assert(curr->is_empty() && !curr->is_pinned(), "Invalid MemRegion"); + _hr_printer.alloc(curr, G1HRPrinter::Archive); + curr->set_allocation_context(AllocationContext::system()); + if (i != end_index) { + curr->set_top(curr->end()); + } else { + curr->set_top(end_address + 1); + } + curr->set_archive(); + _old_set.add(curr); + } + + // Notify mark-sweep of the archive range. + G1MarkSweep::mark_range_archive(base_address, end_address); + } + return true; +} + +void +G1CollectedHeap::fill_archive_regions(MemRegion* ranges, uint count) { + + MemRegion mr = _hrm.reserved(); + HeapWord *prev_end_addr = NULL; + uint prev_end_index = 0; + + // For each MemRegion, create filler objects, if needed, in the G1 regions + // that contain the address range. The address range actually within the + // MemRegion will not be modified. That is assumed to have been initialized + // elsewhere, probably via an mmap of archived heap data. + MutexLockerEx x(Heap_lock); + for (uint i = 0; i < count; i++) { + HeapWord* base_address = ranges[i].start(); + size_t word_size = ranges[i].word_size(); + HeapWord* end_address = ranges[i].last(); + + assert(mr.contains(base_address) && mr.contains(end_address), + "MemRegion outside of heap"); + + uint start_index = _hrm.addr_to_index(base_address); + uint end_index = _hrm.addr_to_index(end_address); + HeapRegion* start_region = _hrm.addr_to_region(base_address); + HeapRegion* end_region = _hrm.addr_to_region(end_address); + HeapWord* bottom_address = start_region->bottom(); + + // Check for a range beginning in the same region in which the + // previous one ended. + if (start_index == prev_end_index) { + bottom_address = prev_end_addr; + start_index++; + } + +#ifdef ASSERT + // Verify the regions were all marked as archive regions by + // alloc_fixed_ranges. + for (uint i = start_index; i <= end_index; i++) { + HeapRegion* curr = region_at(i); + assert(curr->is_archive(), "Invalid range in fill_archive_regions"); + } +#endif + + prev_end_addr = base_address + word_size; + prev_end_index = end_index; + + // Fill the low part of the first allocated region with dummy object(s), + // if the region base does not match the range address, or if the previous + // range ended within the same G1 region, and there is a gap. + if (base_address != bottom_address) { + size_t fill_size = base_address - bottom_address; + G1CollectedHeap::fill_with_non_humongous_objects(bottom_address, fill_size); + _allocator->increase_used(fill_size * HeapWordSize); + } + } +} + + HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { @@ -1131,6 +1338,8 @@ } } else if (hr->is_continues_humongous()) { _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); + } else if (hr->is_archive()) { + _hr_printer->post_compaction(hr, G1HRPrinter::Archive); } else if (hr->is_old()) { _hr_printer->post_compaction(hr, G1HRPrinter::Old); } else { @@ -1724,6 +1933,7 @@ _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), _humongous_reclaim_candidates(), _has_humongous_reclaim_candidates(false), + _recording_allocator(NULL), _free_regions_coming(false), _young_list(new YoungList(this)), _gc_time_stamp(0), @@ -1750,7 +1960,7 @@ _workers->initialize_workers(); _allocator = G1Allocator::create_allocator(this); - _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2; + _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords); int n_queues = (int)ParallelGCThreads; _task_queues = new RefToScanQueueSet(n_queues); @@ -2165,7 +2375,11 @@ // Computes the sum of the storage used by the various regions. size_t G1CollectedHeap::used() const { - return _allocator->used(); + size_t result = _allocator->used(); + if (_recording_allocator != NULL) { + result += _recording_allocator->used(); + } + return result; } size_t G1CollectedHeap::used_unlocked() const { @@ -2593,7 +2807,7 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { HeapRegion* result = _hrm.next_region_in_heap(from); - while (result != NULL && result->is_humongous()) { + while (result != NULL && result->is_pinned()) { result = _hrm.next_region_in_heap(result); } return result; @@ -2901,6 +3115,32 @@ size_t live_bytes() { return _live_bytes; } }; + +class VerifyArchiveOopClosure: public OopClosure { +public: + VerifyArchiveOopClosure(HeapRegion *hr) { } + void do_oop(narrowOop *p) { do_oop_work(p); } + void do_oop( oop *p) { do_oop_work(p); } + + template void do_oop_work(T *p) { + oop obj = oopDesc::load_decode_heap_oop(p); + guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj), + "Archive object references a non-pinned object"); + } +}; + +class VerifyArchiveRegionClosure: public ObjectClosure { +public: + VerifyArchiveRegionClosure(HeapRegion *hr) { } + // Verify that all object pointers are to pinned regions. + void do_object(oop o) { + VerifyArchiveOopClosure checkOop(NULL); + assert(o != NULL, "Should not be here for NULL oops"); + o->oop_iterate_no_header(&checkOop); + } +}; + + class VerifyRegionClosure: public HeapRegionClosure { private: bool _par; @@ -2920,6 +3160,13 @@ } bool doHeapRegion(HeapRegion* r) { + // For archive regions, verify there are no heap pointers to + // non-pinned regions. For all others, verify liveness info. + if (r->is_archive()) { + VerifyArchiveRegionClosure verify_oop_pointers(r); + r->object_iterate(&verify_oop_pointers); + return true; + } if (!r->is_continues_humongous()) { bool failures = false; r->verify(_vo, &failures); @@ -3104,7 +3351,7 @@ switch (vo) { case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr); - case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); + case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive(); default: ShouldNotReachHere(); } return false; // keep some compilers happy @@ -3115,7 +3362,10 @@ switch (vo) { case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj); case VerifyOption_G1UseNextMarking: return is_obj_ill(obj); - case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked(); + case VerifyOption_G1UseMarkWord: { + HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj); + return !obj->is_gc_marked() && !hr->is_archive(); + } default: ShouldNotReachHere(); } return false; // keep some compilers happy @@ -3148,7 +3398,7 @@ st->cr(); st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), " "HS=humongous(starts), HC=humongous(continues), " - "CS=collection set, F=free, TS=gc time stamp, " + "CS=collection set, F=free, A=archive, TS=gc time stamp, " "PTAMS=previous top-at-mark-start, " "NTAMS=next top-at-mark-start)"); PrintRegionClosure blk(st); @@ -3847,6 +4097,9 @@ if (evacuation_failed()) { _allocator->set_used(recalculate_used()); + if (_recording_allocator != NULL) { + _recording_allocator->clear_used(); + } for (uint i = 0; i < ParallelGCThreads; i++) { if (_evacuation_failed_info_array[i].has_failed()) { _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]); @@ -6110,7 +6363,8 @@ // We ignore young regions, we'll empty the young list afterwards. // We ignore humongous regions, we're not tearing down the // humongous regions set. - assert(r->is_free() || r->is_young() || r->is_humongous(), + // We ignore archive regions. + assert(r->is_free() || r->is_young() || r->is_humongous() || r->is_archive(), "it cannot be another type"); } return false; @@ -6168,13 +6422,19 @@ assert(!r->is_young(), "we should not come across young regions"); if (r->is_humongous()) { - // We ignore humongous regions, we left the humongous set unchanged + // We ignore humongous regions. + // We left the humongous set unchanged, } else { // Objects that were compacted would have ended up on regions - // that were previously old or free. + // that were previously old or free. Archive regions (which are + // old) will not have been touched. assert(r->is_free() || r->is_old(), "invariant"); - // We now consider them old, so register as such. - r->set_old(); + // We now consider them old, so register as such. Leave + // archive regions set that way, however, while still adding + // them to the old set. + if (!r->is_archive()) { + r->set_old(); + } _old_set->add(r); } _total_used += r->used(); @@ -6200,6 +6460,9 @@ if (!free_list_only) { _allocator->set_used(cl.total_used()); + if (_recording_allocator != NULL) { + _recording_allocator->clear_used(); + } } assert(_allocator->used_unlocked() == recalculate_used(), err_msg("inconsistent _allocator->used_unlocked(), " @@ -6300,6 +6563,25 @@ _hr_printer.retire(alloc_region); } +HeapRegion* G1CollectedHeap::alloc_highest_available_region() { + bool expanded = false; + uint index = _hrm.find_highest_available(&expanded); + + if (index != G1_NO_HRM_INDEX) { + if (expanded) { + ergo_verbose1(ErgoHeapSizing, + "attempt heap expansion", + ergo_format_reason("requested address range outside heap bounds") + ergo_format_byte("region size"), + HeapRegion::GrainWords * HeapWordSize); + } + _hrm.allocate_free_regions_starting_at(index, 1); + return region_at(index); + } + return NULL; +} + + // Heap region set verification class VerifyRegionListsClosure : public HeapRegionClosure { @@ -6336,6 +6618,7 @@ assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index())); _old_count.increment(1u, hr->capacity()); } else { + assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index())); ShouldNotReachHere(); } return false;