< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

*** 33,46 **** #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorState.hpp" - #include "gc/g1/g1ErgoVerbose.hpp" #include "gc/g1/g1EvacFailure.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" - #include "gc/g1/g1Log.hpp" #include "gc/g1/g1MarkSweep.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1ParScanThreadState.inline.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RemSet.inline.hpp" --- 33,44 ----
*** 61,70 **** --- 59,69 ---- #include "gc/shared/gcTraceTime.hpp" #include "gc/shared/generationSpec.hpp" #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/taskqueue.inline.hpp" + #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/init.hpp"
*** 202,242 **** HeapRegion* G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (!_secondary_free_list.is_empty() || free_regions_coming()) { if (!_secondary_free_list.is_empty()) { ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "secondary_free_list has %u entries", _secondary_free_list.length()); - } // It looks as if there are free regions available on the // secondary_free_list. Let's move them to the free_list and try // again to allocate from it. append_secondary_free_list(); assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not " "empty we should have moved at least one entry to the free_list"); HeapRegion* res = _hrm.allocate_free_region(is_old); ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "allocated " HR_FORMAT " from secondary_free_list", HR_FORMAT_PARAMS(res)); - } return res; } // Wait here until we get notified either when (a) there are no // more free regions coming or (b) some regions have been moved on // the secondary_free_list. SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); } ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "could not allocate from secondary_free_list"); - } return NULL; } HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) { assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords, --- 201,235 ---- HeapRegion* G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (!_secondary_free_list.is_empty() || free_regions_coming()) { if (!_secondary_free_list.is_empty()) { ! log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : " "secondary_free_list has %u entries", _secondary_free_list.length()); // It looks as if there are free regions available on the // secondary_free_list. Let's move them to the free_list and try // again to allocate from it. append_secondary_free_list(); assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not " "empty we should have moved at least one entry to the free_list"); HeapRegion* res = _hrm.allocate_free_region(is_old); ! log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : " "allocated " HR_FORMAT " from secondary_free_list", HR_FORMAT_PARAMS(res)); return res; } // Wait here until we get notified either when (a) there are no // more free regions coming or (b) some regions have been moved on // the secondary_free_list. SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); } ! log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : " "could not allocate from secondary_free_list"); return NULL; } HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) { assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
*** 244,285 **** "when we are allocating a single humongous region"); HeapRegion* res; if (G1StressConcRegionFreeing) { if (!_secondary_free_list.is_empty()) { ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "forced to look at the secondary_free_list"); - } res = new_region_try_secondary_free_list(is_old); if (res != NULL) { return res; } } } res = _hrm.allocate_free_region(is_old); if (res == NULL) { ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "res == NULL, trying the secondary_free_list"); - } res = new_region_try_secondary_free_list(is_old); } if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { // Currently, only attempts to allocate GC alloc regions set // do_expand to true. So, we should only reach here during a // safepoint. If this assumption changes we might have to // reconsider the use of _expand_heap_after_alloc_failure. assert(SafepointSynchronize::is_at_safepoint(), "invariant"); ! ergo_verbose1(ErgoHeapSizing, ! "attempt heap expansion", ! ergo_format_reason("region allocation request failed") ! ergo_format_byte("allocation request"), word_size * HeapWordSize); if (expand(word_size * HeapWordSize)) { // Given that expand() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. // In either case allocate_free_region() will check for NULL. --- 237,272 ---- "when we are allocating a single humongous region"); HeapRegion* res; if (G1StressConcRegionFreeing) { if (!_secondary_free_list.is_empty()) { ! log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : " "forced to look at the secondary_free_list"); res = new_region_try_secondary_free_list(is_old); if (res != NULL) { return res; } } } res = _hrm.allocate_free_region(is_old); if (res == NULL) { ! log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : " "res == NULL, trying the secondary_free_list"); res = new_region_try_secondary_free_list(is_old); } if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { // Currently, only attempts to allocate GC alloc regions set // do_expand to true. So, we should only reach here during a // safepoint. If this assumption changes we might have to // reconsider the use of _expand_heap_after_alloc_failure. assert(SafepointSynchronize::is_at_safepoint(), "invariant"); ! log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); + if (expand(word_size * HeapWordSize)) { // Given that expand() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. // In either case allocate_free_region() will check for NULL.
*** 446,461 **** // If so, try expansion. first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); if (first != G1_NO_HRM_INDEX) { // We found something. Make sure these regions are committed, i.e. expand // the heap. Alternatively we could do a defragmentation GC. ! ergo_verbose1(ErgoHeapSizing, ! "attempt heap expansion", ! ergo_format_reason("humongous allocation request failed") ! ergo_format_byte("allocation request"), word_size * HeapWordSize); _hrm.expand_at(first, obj_regions); g1_policy()->record_new_heap_size(num_regions()); #ifdef ASSERT for (uint i = first; i < first + obj_regions; ++i) { --- 433,446 ---- // If so, try expansion. first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); if (first != G1_NO_HRM_INDEX) { // We found something. Make sure these regions are committed, i.e. expand // the heap. Alternatively we could do a defragmentation GC. ! log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); + _hrm.expand_at(first, obj_regions); g1_policy()->record_new_heap_size(num_regions()); #ifdef ASSERT for (uint i = first; i < first + obj_regions; ++i) {
*** 769,783 **** if (!_hrm.allocate_containing_regions(curr_range, &commits)) { return false; } increase_used(word_size * HeapWordSize); if (commits != 0) { ! ergo_verbose1(ErgoHeapSizing, ! "attempt heap expansion", ! ergo_format_reason("allocate archive regions") ! ergo_format_byte("total size"), HeapRegion::GrainWords * HeapWordSize * commits); } // Mark each G1 region touched by the range as archive, add it to the old set, // and set the allocation context and top. HeapRegion* curr_region = _hrm.addr_to_region(start_address); --- 754,766 ---- if (!_hrm.allocate_containing_regions(curr_range, &commits)) { return false; } increase_used(word_size * HeapWordSize); if (commits != 0) { ! log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B", HeapRegion::GrainWords * HeapWordSize * commits); + } // Mark each G1 region touched by the range as archive, add it to the old set, // and set the allocation context and top. HeapRegion* curr_region = _hrm.addr_to_region(start_address);
*** 954,967 **** // Notify mark-sweep that this is no longer an archive range. G1MarkSweep::set_range_archive(ranges[i], false); } if (uncommitted_regions != 0) { ! ergo_verbose1(ErgoHeapSizing, ! "attempt heap shrinking", ! ergo_format_reason("uncommitted archive regions") ! ergo_format_byte("total size"), HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); } decrease_used(size_used); } --- 937,947 ---- // Notify mark-sweep that this is no longer an archive range. G1MarkSweep::set_range_archive(ranges[i], false); } if (uncommitted_regions != 0) { ! log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B", HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); } decrease_used(size_used); }
*** 1195,1206 **** --- 1175,1189 ---- PostCompactionPrinterClosure(G1HRPrinter* hr_printer) : _hr_printer(hr_printer) { } }; void G1CollectedHeap::print_hrm_post_compaction() { + if (_hr_printer.is_active()) { PostCompactionPrinterClosure cl(hr_printer()); heap_region_iterate(&cl); + } + } bool G1CollectedHeap::do_collection(bool explicit_gc, bool clear_all_soft_refs, size_t word_size) {
*** 1218,1228 **** gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); SvcGCMarker sgcm(SvcGCMarker::FULL); ResourceMark rm; - G1Log::update_level(); print_heap_before_gc(); trace_heap_before_gc(gc_tracer); size_t metadata_prev_used = MetaspaceAux::used_bytes(); --- 1201,1210 ----
*** 1236,1249 **** { IsGCActiveMark x; // Timing assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant"); ! TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); { ! GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); g1_policy()->record_full_collection_start(); --- 1218,1231 ---- { IsGCActiveMark x; // Timing assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant"); ! GCTraceCPUTime tcpu; { ! GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); g1_policy()->record_full_collection_start();
*** 1290,1304 **** // Make sure we'll choose a new allocation region afterwards. _allocator->release_mutator_alloc_region(); _allocator->abandon_gc_alloc_regions(); g1_rem_set()->cleanupHRRS(); - // We should call this after we retire any currently active alloc - // regions so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh // after this full GC. abandon_collection_set(g1_policy()->inc_cset_head()); --- 1272,1281 ----
*** 1362,1379 **** check_gc_time_stamps(); // Resize the heap if necessary. resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); - if (_hr_printer.is_active()) { // We should do this after we potentially resize the heap so // that all the COMMIT / UNCOMMIT events are generated before ! // the end GC event. ! print_hrm_post_compaction(); - _hr_printer.end_gc(true /* full */, (size_t) total_collections()); - } G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); if (hot_card_cache->use_cache()) { hot_card_cache->reset_card_counts(); hot_card_cache->reset_hot_cache(); --- 1339,1352 ---- check_gc_time_stamps(); // Resize the heap if necessary. resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); // We should do this after we potentially resize the heap so // that all the COMMIT / UNCOMMIT events are generated before ! // the compaction events. print_hrm_post_compaction(); G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); if (hot_card_cache->use_cache()) { hot_card_cache->reset_card_counts(); hot_card_cache->reset_hot_cache();
*** 1438,1463 **** _allocator->init_mutator_alloc_region(); g1_policy()->record_full_collection_end(); - if (G1Log::fine()) { - g1_policy()->print_heap_transition(); - } - // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); gc_epilogue(true); } ! if (G1Log::finer()) { ! g1_policy()->print_detailed_heap_transition(true /* full */); ! } print_heap_after_gc(); trace_heap_after_gc(gc_tracer); post_full_gc_dump(gc_timer); --- 1411,1430 ---- _allocator->init_mutator_alloc_region(); g1_policy()->record_full_collection_end(); // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); gc_epilogue(true); } ! g1_policy()->print_detailed_heap_transition(); print_heap_after_gc(); trace_heap_after_gc(gc_tracer); post_full_gc_dump(gc_timer);
*** 1538,1571 **** maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); if (capacity_after_gc < minimum_desired_capacity) { // Don't expand unless it's significant size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; ! ergo_verbose4(ErgoHeapSizing, ! "attempt heap expansion", ! ergo_format_reason("capacity lower than " ! "min desired capacity after Full GC") ! ergo_format_byte("capacity") ! ergo_format_byte("occupancy") ! ergo_format_byte_perc("min desired capacity"), ! capacity_after_gc, used_after_gc, ! minimum_desired_capacity, (double) MinHeapFreeRatio); expand(expand_bytes); // No expansion, now see if we want to shrink } else if (capacity_after_gc > maximum_desired_capacity) { // Capacity too large, compute shrinking size size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; ! ergo_verbose4(ErgoHeapSizing, ! "attempt heap shrinking", ! ergo_format_reason("capacity higher than " ! "max desired capacity after Full GC") ! ergo_format_byte("capacity") ! ergo_format_byte("occupancy") ! ergo_format_byte_perc("max desired capacity"), ! capacity_after_gc, used_after_gc, ! maximum_desired_capacity, (double) MaxHeapFreeRatio); shrink(shrink_bytes); } } HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size, --- 1505,1530 ---- maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size); if (capacity_after_gc < minimum_desired_capacity) { // Don't expand unless it's significant size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; ! ! log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). " ! "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", ! capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); ! expand(expand_bytes); // No expansion, now see if we want to shrink } else if (capacity_after_gc > maximum_desired_capacity) { // Capacity too large, compute shrinking size size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; ! ! log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). " ! "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", ! capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); ! shrink(shrink_bytes); } } HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
*** 1668,1682 **** assert_at_safepoint(true /* should_be_vm_thread */); verify_region_sets_optional(); size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); ! ergo_verbose1(ErgoHeapSizing, ! "attempt heap expansion", ! ergo_format_reason("allocation request failed") ! ergo_format_byte("allocation request"), word_size * HeapWordSize); if (expand(expand_bytes)) { _hrm.verify_optional(); verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, context, --- 1627,1640 ---- assert_at_safepoint(true /* should_be_vm_thread */); verify_region_sets_optional(); size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); ! log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); + + if (expand(expand_bytes)) { _hrm.verify_optional(); verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, context,
*** 1687,1706 **** bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) { size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); ! ergo_verbose2(ErgoHeapSizing, ! "expand the heap", ! ergo_format_byte("requested expansion amount") ! ergo_format_byte("attempted expansion amount"), expand_bytes, aligned_expand_bytes); if (is_maximal_no_gc()) { ! ergo_verbose0(ErgoHeapSizing, ! "did not expand the heap", ! ergo_format_reason("heap already fully expanded")); return false; } double expand_heap_start_time_sec = os::elapsedTime(); uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); --- 1645,1660 ---- bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) { size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); ! ! log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B", expand_bytes, aligned_expand_bytes); if (is_maximal_no_gc()) { ! log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); return false; } double expand_heap_start_time_sec = os::elapsedTime(); uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
*** 1714,1726 **** if (expanded_by > 0) { size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); g1_policy()->record_new_heap_size(num_regions()); } else { ! ergo_verbose0(ErgoHeapSizing, ! "did not expand the heap", ! ergo_format_reason("heap expansion operation failed")); // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure && _hrm.available() >= regions_to_expand) { // We had head room... --- 1668,1679 ---- if (expanded_by > 0) { size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); g1_policy()->record_new_heap_size(num_regions()); } else { ! log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)"); ! // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure && _hrm.available() >= regions_to_expand) { // We had head room...
*** 1738,1759 **** uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; ! ergo_verbose3(ErgoHeapSizing, ! "shrink the heap", ! ergo_format_byte("requested shrinking amount") ! ergo_format_byte("aligned shrinking amount") ! ergo_format_byte("attempted shrinking amount"), shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { g1_policy()->record_new_heap_size(num_regions()); } else { ! ergo_verbose0(ErgoHeapSizing, ! "did not shrink the heap", ! ergo_format_reason("heap shrinking operation failed")); } } void G1CollectedHeap::shrink(size_t shrink_bytes) { verify_region_sets_optional(); --- 1691,1707 ---- uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; ! ! log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { g1_policy()->record_new_heap_size(num_regions()); } else { ! log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); } } void G1CollectedHeap::shrink(size_t shrink_bytes) { verify_region_sets_optional();
*** 1861,1890 **** rs.alignment(), HeapRegion::GrainBytes, translation_factor, mtGC); if (TracePageSizes) { ! gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT, description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size); } return result; } jint G1CollectedHeap::initialize() { CollectedHeap::pre_initialize(); os::enable_vtime(); - G1Log::init(); - // Necessary to satisfy locking discipline assertions. MutexLocker x(Heap_lock); - // We have to initialize the printer before committing the heap, as - // it will be used then. - _hr_printer.set_active(G1PrintHeapRegions); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some // cases incorrectly returns the size in wordSize units rather than // HeapWordSize). --- 1809,1832 ---- rs.alignment(), HeapRegion::GrainBytes, translation_factor, mtGC); if (TracePageSizes) { ! tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT, description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size); } return result; } jint G1CollectedHeap::initialize() { CollectedHeap::pre_initialize(); os::enable_vtime(); // Necessary to satisfy locking discipline assertions. MutexLocker x(Heap_lock); // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some // cases incorrectly returns the size in wordSize units rather than // HeapWordSize).
*** 2073,2083 **** return JNI_OK; } void G1CollectedHeap::stop() { // Stop all concurrent threads. We do this to make sure these threads ! // do not continue to execute and access resources (e.g. gclog_or_tty) // that are destroyed during shutdown. _cg1r->stop(); _cmThread->stop(); if (G1StringDedup::is_enabled()) { G1StringDedup::stop(); --- 2015,2025 ---- return JNI_OK; } void G1CollectedHeap::stop() { // Stop all concurrent threads. We do this to make sure these threads ! // do not continue to execute and access resources (e.g. logging) // that are destroyed during shutdown. _cg1r->stop(); _cmThread->stop(); if (G1StringDedup::is_enabled()) { G1StringDedup::stop();
*** 2190,2201 **** _gc_time_stamp(gc_time_stamp), _failures(false) { } virtual bool doHeapRegion(HeapRegion* hr) { unsigned region_gc_time_stamp = hr->get_gc_time_stamp(); if (_gc_time_stamp != region_gc_time_stamp) { ! gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, " ! "expected %d", HR_FORMAT_PARAMS(hr), region_gc_time_stamp, _gc_time_stamp); _failures = true; } return false; } --- 2132,2142 ---- _gc_time_stamp(gc_time_stamp), _failures(false) { } virtual bool doHeapRegion(HeapRegion* hr) { unsigned region_gc_time_stamp = hr->get_gc_time_stamp(); if (_gc_time_stamp != region_gc_time_stamp) { ! log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr), region_gc_time_stamp, _gc_time_stamp); _failures = true; } return false; }
*** 2779,2794 **** template <class T> void do_oop_nv(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if (_g1h->is_obj_dead_cond(obj, _vo)) { ! gclog_or_tty->print_cr("Root location " PTR_FORMAT " " ! "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); if (_vo == VerifyOption_G1UseMarkWord) { ! gclog_or_tty->print_cr(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark()); } ! obj->print_on(gclog_or_tty); _failures = true; } } } --- 2720,2736 ---- template <class T> void do_oop_nv(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if (_g1h->is_obj_dead_cond(obj, _vo)) { ! LogHandle(gc, verify) log; ! log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); if (_vo == VerifyOption_G1UseMarkWord) { ! log.info(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark()); } ! ResourceMark rm; ! obj->print_on(log.info_stream()); _failures = true; } } }
*** 2829,2839 **** HeapRegion* hr = _g1h->heap_region_containing(obj); HeapRegionRemSet* hrrs = hr->rem_set(); // Verify that the strong code root list for this region // contains the nmethod if (!hrrs->strong_code_roots_list_contains(_nm)) { ! gclog_or_tty->print_cr("Code root location " PTR_FORMAT " " "from nmethod " PTR_FORMAT " not in strong " "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")", p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end())); _failures = true; } --- 2771,2781 ---- HeapRegion* hr = _g1h->heap_region_containing(obj); HeapRegionRemSet* hrrs = hr->rem_set(); // Verify that the strong code root list for this region // contains the nmethod if (!hrrs->strong_code_roots_list_contains(_nm)) { ! log_info(gc, verify)("Code root location " PTR_FORMAT " " "from nmethod " PTR_FORMAT " not in strong " "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")", p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end())); _failures = true; }
*** 3010,3025 **** } else if (!r->is_starts_humongous()) { VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); r->object_iterate(&not_dead_yet_cl); if (_vo != VerifyOption_G1UseNextMarking) { if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { ! gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] " ! "max_live_bytes " SIZE_FORMAT " " ! "< calculated " SIZE_FORMAT, ! p2i(r->bottom()), p2i(r->end()), ! r->max_live_bytes(), ! not_dead_yet_cl.live_bytes()); _failures = true; } } else { // When vo == UseNextMarking we cannot currently do a sanity // check on the live bytes as the calculation has not been --- 2952,2963 ---- } else if (!r->is_starts_humongous()) { VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); r->object_iterate(&not_dead_yet_cl); if (_vo != VerifyOption_G1UseNextMarking) { if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { ! log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT, ! p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes()); _failures = true; } } else { // When vo == UseNextMarking we cannot currently do a sanity // check on the live bytes as the calculation has not been
*** 3063,3078 **** _failures = true; } } }; ! void G1CollectedHeap::verify(bool silent, VerifyOption vo) { ! if (SafepointSynchronize::is_at_safepoint()) { assert(Thread::current()->is_VM_thread(), "Expected to be executed serially by the VM thread at this point"); ! if (!silent) { gclog_or_tty->print("Roots "); } VerifyRootsClosure rootsCl(vo); VerifyKlassClosure klassCl(this, &rootsCl); CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false); // We apply the relevant closures to all the oops in the --- 3001,3019 ---- _failures = true; } } }; ! void G1CollectedHeap::verify(VerifyOption vo) { ! if (!SafepointSynchronize::is_at_safepoint()) { ! log_info(gc, verify)("Skipping verification. Not at safepoint."); ! } ! assert(Thread::current()->is_VM_thread(), "Expected to be executed serially by the VM thread at this point"); ! log_debug(gc, verify)("Roots"); VerifyRootsClosure rootsCl(vo); VerifyKlassClosure klassCl(this, &rootsCl); CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false); // We apply the relevant closures to all the oops in the
*** 3093,3107 **** if (vo != VerifyOption_G1UseMarkWord) { // If we're verifying during a full GC then the region sets // will have been torn down at the start of the GC. Therefore // verifying the region sets will fail. So we only verify // the region sets when not in a full GC. ! if (!silent) { gclog_or_tty->print("HeapRegionSets "); } verify_region_sets(); } ! if (!silent) { gclog_or_tty->print("HeapRegions "); } if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { G1ParVerifyTask task(this, vo); workers()->run_task(&task); if (task.failures()) { --- 3034,3048 ---- if (vo != VerifyOption_G1UseMarkWord) { // If we're verifying during a full GC then the region sets // will have been torn down at the start of the GC. Therefore // verifying the region sets will fail. So we only verify // the region sets when not in a full GC. ! log_debug(gc, verify)("HeapRegionSets"); verify_region_sets(); } ! log_debug(gc, verify)("HeapRegions"); if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { G1ParVerifyTask task(this, vo); workers()->run_task(&task); if (task.failures()) {
*** 3115,3151 **** failures = true; } } if (G1StringDedup::is_enabled()) { ! if (!silent) gclog_or_tty->print("StrDedup "); G1StringDedup::verify(); } if (failures) { ! gclog_or_tty->print_cr("Heap:"); // It helps to have the per-region information in the output to // help us track down what went wrong. This is why we call // print_extended_on() instead of print_on(). ! print_extended_on(gclog_or_tty); ! gclog_or_tty->cr(); ! gclog_or_tty->flush(); } guarantee(!failures, "there should not have been any failures"); - } else { - if (!silent) { - gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet"); - if (G1StringDedup::is_enabled()) { - gclog_or_tty->print(", StrDedup"); - } - gclog_or_tty->print(") "); - } - } - } - - void G1CollectedHeap::verify(bool silent) { - verify(silent, VerifyOption_G1UsePrevMarking); } double G1CollectedHeap::verify(bool guard, const char* msg) { double verify_time_ms = 0.0; --- 3056,3079 ---- failures = true; } } if (G1StringDedup::is_enabled()) { ! log_debug(gc, verify)("StrDedup"); G1StringDedup::verify(); } if (failures) { ! log_info(gc, verify)("Heap after failed verification:"); // It helps to have the per-region information in the output to // help us track down what went wrong. This is why we call // print_extended_on() instead of print_on(). ! LogHandle(gc, verify) log; ! ResourceMark rm; ! print_extended_on(log.info_stream()); } guarantee(!failures, "there should not have been any failures"); } double G1CollectedHeap::verify(bool guard, const char* msg) { double verify_time_ms = 0.0;
*** 3159,3174 **** return verify_time_ms; } void G1CollectedHeap::verify_before_gc() { ! double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:"); g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms); } void G1CollectedHeap::verify_after_gc() { ! double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:"); g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms); } class PrintRegionClosure: public HeapRegionClosure { outputStream* _st; --- 3087,3102 ---- return verify_time_ms; } void G1CollectedHeap::verify_before_gc() { ! double verify_time_ms = verify(VerifyBeforeGC, "Before GC"); g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms); } void G1CollectedHeap::verify_after_gc() { ! double verify_time_ms = verify(VerifyAfterGC, "After GC"); g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms); } class PrintRegionClosure: public HeapRegionClosure { outputStream* _st;
*** 3274,3289 **** if (TraceYoungGenTime || TraceOldGenTime) { // The "G1CollectorPolicy" is keeping track of these stats, so delegate // to that. g1_policy()->print_tracing_info(); } - if (G1SummarizeRSetStats) { g1_rem_set()->print_summary_info(); - } - if (G1SummarizeConcMark) { concurrent_mark()->print_summary_info(); - } g1_policy()->print_yg_surv_rate_info(); } #ifndef PRODUCT // Helpful for debugging RSet issues. --- 3202,3213 ----
*** 3297,3328 **** bool doHeapRegion(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); size_t occupied = hrrs->occupied(); _occupied_sum += occupied; ! gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r)); if (occupied == 0) { ! gclog_or_tty->print_cr(" RSet is empty"); } else { hrrs->print(); } ! gclog_or_tty->print_cr("----------"); return false; } PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { ! gclog_or_tty->cr(); ! gclog_or_tty->print_cr("========================================"); ! gclog_or_tty->print_cr("%s", msg); ! gclog_or_tty->cr(); } ~PrintRSetsClosure() { ! gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum); ! gclog_or_tty->print_cr("========================================"); ! gclog_or_tty->cr(); } }; void G1CollectedHeap::print_cset_rsets() { PrintRSetsClosure cl("Printing CSet RSets"); --- 3221,3252 ---- bool doHeapRegion(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); size_t occupied = hrrs->occupied(); _occupied_sum += occupied; ! tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r)); if (occupied == 0) { ! tty->print_cr(" RSet is empty"); } else { hrrs->print(); } ! tty->print_cr("----------"); return false; } PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { ! tty->cr(); ! tty->print_cr("========================================"); ! tty->print_cr("%s", msg); ! tty->cr(); } ~PrintRSetsClosure() { ! tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum); ! tty->print_cr("========================================"); ! tty->cr(); } }; void G1CollectedHeap::print_cset_rsets() { PrintRSetsClosure cl("Printing CSet RSets");
*** 3376,3399 **** assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Fill TLAB's and such accumulate_statistics_all_tlabs(); ensure_parsability(true); ! if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && ! (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { ! g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); ! } } void G1CollectedHeap::gc_epilogue(bool full) { - - if (G1SummarizeRSetStats && - (G1SummarizeRSetStatsPeriod > 0) && // we are at the end of the GC. Total collections has already been increased. ! ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { ! g1_rem_set()->print_periodic_summary_info("After GC RS summary"); ! } // FIXME: what is this about? // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" // is set. #if defined(COMPILER2) || INCLUDE_JVMCI --- 3300,3315 ---- assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Fill TLAB's and such accumulate_statistics_all_tlabs(); ensure_parsability(true); ! g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); } void G1CollectedHeap::gc_epilogue(bool full) { // we are at the end of the GC. Total collections has already been increased. ! g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1); // FIXME: what is this about? // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" // is set. #if defined(COMPILER2) || INCLUDE_JVMCI
*** 3635,3645 **** st->print_raw_cr("GC Task Stats"); st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } ! void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { print_taskqueue_stats_hdr(st); TaskQueueStats totals; const uint n = num_task_queues(); for (uint i = 0; i < n; ++i) { --- 3551,3568 ---- st->print_raw_cr("GC Task Stats"); st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } ! void G1CollectedHeap::print_taskqueue_stats() const { ! LogHandle(gc, task, stats) log; ! if (!log.is_develop()) { ! return; ! } ! ResourceMark rm; ! outputStream* st = log.develop_stream(); ! print_taskqueue_stats_hdr(st); TaskQueueStats totals; const uint n = num_task_queues(); for (uint i = 0; i < n; ++i) {
*** 3657,3702 **** task_queue(i)->stats.reset(); } } #endif // TASKQUEUE_STATS ! void G1CollectedHeap::log_gc_header() { ! if (!G1Log::fine()) { ! return; ! } ! ! gclog_or_tty->gclog_stamp(); ! ! GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) ! .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)") ! .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : ""); ! ! gclog_or_tty->print("[%s", (const char*)gc_cause_str); ! } ! ! void G1CollectedHeap::log_gc_footer(double pause_time_sec) { ! if (!G1Log::fine()) { ! return; ! } ! ! if (G1Log::finer()) { if (evacuation_failed()) { ! gclog_or_tty->print(" (to-space exhausted)"); } ! gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); g1_policy()->print_phases(pause_time_sec); g1_policy()->print_detailed_heap_transition(); - } else { - if (evacuation_failed()) { - gclog_or_tty->print("--"); - } - g1_policy()->print_heap_transition(); - gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); - } - gclog_or_tty->flush(); } void G1CollectedHeap::wait_for_root_region_scanning() { double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start --- 3580,3601 ---- task_queue(i)->stats.reset(); } } #endif // TASKQUEUE_STATS ! void G1CollectedHeap::log_gc_footer(double pause_time_counter) { if (evacuation_failed()) { ! log_info(gc)("To-space exhausted"); } ! ! double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter); g1_policy()->print_phases(pause_time_sec); + g1_policy()->print_detailed_heap_transition(); } + void G1CollectedHeap::wait_for_root_region_scanning() { double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start
*** 3727,3737 **** SvcGCMarker sgcm(SvcGCMarker::MINOR); ResourceMark rm; wait_for_root_region_scanning(); - G1Log::update_level(); print_heap_before_gc(); trace_heap_before_gc(_gc_tracer_stw); verify_region_sets_optional(); verify_dirty_young_regions(); --- 3626,3635 ----
*** 3764,3783 **** register_concurrent_cycle_start(_gc_timer_stw->gc_start()); } _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); ! TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), workers()->active_workers(), Threads::number_of_non_daemon_threads()); workers()->set_active_workers(active_workers); double pause_start_sec = os::elapsedTime(); g1_policy()->note_gc_start(active_workers); - log_gc_header(); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); // If the secondary_free_list is not empty, append it to the --- 3662,3691 ---- register_concurrent_cycle_start(_gc_timer_stw->gc_start()); } _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); ! GCTraceCPUTime tcpu; uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), workers()->active_workers(), Threads::number_of_non_daemon_threads()); workers()->set_active_workers(active_workers); + FormatBuffer<> gc_string("Pause "); + if (collector_state()->during_initial_mark_pause()) { + gc_string.append("Initial Mark"); + } else if (collector_state()->gcs_are_young()) { + gc_string.append("Young"); + } else { + gc_string.append("Mixed"); + } + GCTraceTime(Info, gc) tm5(gc_string, NULL, gc_cause(), true); + double pause_start_sec = os::elapsedTime(); + double pause_start_counter = os::elapsed_counter(); g1_policy()->note_gc_start(active_workers); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); // If the secondary_free_list is not empty, append it to the
*** 3827,3841 **** // Forget the current alloc region (we might even choose it to be part // of the collection set!). _allocator->release_mutator_alloc_region(); - // We should call this after we retire the mutator alloc - // region(s) so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(false /* full */, (size_t) total_collections()); - // This timing is only used by the ergonomics to handle our pause target. // It is unclear why this should not include the full pause. We will // investigate this in CR 7178365. // // Preserving the old comment here if that helps the investigation: --- 3735,3744 ----
*** 3955,3965 **** { size_t expand_bytes = g1_policy()->expansion_amount(); if (expand_bytes > 0) { size_t bytes_before = capacity(); ! // No need for an ergo verbose message here, // expansion_amount() does this when it returns a value > 0. double expand_ms; if (!expand(expand_bytes, &expand_ms)) { // We failed to expand the heap. Cannot do anything about it. } --- 3858,3868 ---- { size_t expand_bytes = g1_policy()->expansion_amount(); if (expand_bytes > 0) { size_t bytes_before = capacity(); ! // No need for an ergo logging here, // expansion_amount() does this when it returns a value > 0. double expand_ms; if (!expand(expand_bytes, &expand_ms)) { // We failed to expand the heap. Cannot do anything about it. }
*** 4015,4049 **** ref_processor_stw()->verify_no_references_recorded(); // CM reference discovery will be re-enabled if necessary. } - // We should do this after we potentially expand the heap so - // that all the COMMIT events are generated before the end GC - // event, and after we retire the GC alloc regions so that all - // RETIRE events are generated before the end GC event. - _hr_printer.end_gc(false /* full */, (size_t) total_collections()); - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif gc_epilogue(false); } // Print the remainder of the GC log output. ! log_gc_footer(os::elapsedTime() - pause_start_sec); // It is not yet to safe to tell the concurrent mark to // start as we have some optional output below. We don't want the // output from the concurrent mark thread interfering with this // logging output either. _hrm.verify_optional(); verify_region_sets_optional(); ! TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats()); TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); print_heap_after_gc(); trace_heap_after_gc(_gc_tracer_stw); --- 3918,3946 ---- ref_processor_stw()->verify_no_references_recorded(); // CM reference discovery will be re-enabled if necessary. } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif gc_epilogue(false); } // Print the remainder of the GC log output. ! log_gc_footer(os::elapsed_counter() - pause_start_counter); // It is not yet to safe to tell the concurrent mark to // start as we have some optional output below. We don't want the // output from the concurrent mark thread interfering with this // logging output either. _hrm.verify_optional(); verify_region_sets_optional(); ! TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); print_heap_after_gc(); trace_heap_after_gc(_gc_tracer_stw);
*** 4230,4246 **** _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts); } assert(pss->queue_is_empty(), "should be empty"); ! if (PrintTerminationStats) { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); size_t lab_waste; size_t lab_undo_waste; pss->waste(lab_waste, lab_undo_waste); ! _g1h->print_termination_stats(gclog_or_tty, ! worker_id, (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */ strong_roots_sec * 1000.0, /* strong roots time */ term_sec * 1000.0, /* evac term time */ evac_term_attempts, /* evac term attempts */ lab_waste, /* alloc buffer waste */ --- 4127,4142 ---- _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts); } assert(pss->queue_is_empty(), "should be empty"); ! if (log_is_enabled(Debug, gc, task, stats)) { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); size_t lab_waste; size_t lab_undo_waste; pss->waste(lab_waste, lab_undo_waste); ! _g1h->print_termination_stats(worker_id, (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */ strong_roots_sec * 1000.0, /* strong roots time */ term_sec * 1000.0, /* evac term time */ evac_term_attempts, /* evac term attempts */ lab_waste, /* alloc buffer waste */
*** 4254,4279 **** } _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime()); } }; ! void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) { ! st->print_raw_cr("GC Termination Stats"); ! st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------"); ! st->print_raw_cr("thr ms ms % ms % attempts total alloc undo"); ! st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------"); } ! void G1CollectedHeap::print_termination_stats(outputStream* const st, ! uint worker_id, double elapsed_ms, double strong_roots_ms, double term_ms, size_t term_attempts, size_t alloc_buffer_waste, size_t undo_waste) const { ! st->print_cr("%3d %9.2f %9.2f %6.2f " "%9.2f %6.2f " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms, term_ms, term_ms * 100 / elapsed_ms, term_attempts, (alloc_buffer_waste + undo_waste) * HeapWordSize / K, --- 4150,4179 ---- } _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime()); } }; ! void G1CollectedHeap::print_termination_stats_hdr() { ! LogHandle(gc, task, stats) log; ! if (!log.is_debug()) { ! return; ! } ! log.debug("GC Termination Stats"); ! log.debug(" elapsed --strong roots-- -------termination------- ------waste (KiB)------"); ! log.debug("thr ms ms %% ms %% attempts total alloc undo"); ! log.debug("--- --------- --------- ------ --------- ------ -------- ------- ------- -------"); } ! void G1CollectedHeap::print_termination_stats(uint worker_id, double elapsed_ms, double strong_roots_ms, double term_ms, size_t term_attempts, size_t alloc_buffer_waste, size_t undo_waste) const { ! log_debug(gc, task, stats) ! ("%3d %9.2f %9.2f %6.2f " "%9.2f %6.2f " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms, term_ms, term_ms * 100 / elapsed_ms, term_attempts, (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
*** 4318,4335 **** StringTable::parallel_claimed_index(), _initial_string_table_size); guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, "claim value %d after unlink less than initial symbol table size %d", SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); ! if (G1TraceStringSymbolTableScrubbing) { ! gclog_or_tty->print_cr("Cleaned string and symbol table, " "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", strings_processed(), strings_removed(), symbols_processed(), symbols_removed()); } - } void work(uint worker_id) { int strings_processed = 0; int strings_removed = 0; int symbols_processed = 0; --- 4218,4233 ---- StringTable::parallel_claimed_index(), _initial_string_table_size); guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, "claim value %d after unlink less than initial symbol table size %d", SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); ! log_trace(gc, stringdedup)("Cleaned string and symbol table, " "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", strings_processed(), strings_removed(), symbols_processed(), symbols_removed()); } void work(uint worker_id) { int strings_processed = 0; int strings_removed = 0; int symbols_processed = 0;
*** 5152,5165 **** // InitialMark needs claim bits to keep track of the marked-through CLDs. if (collector_state()->during_initial_mark_pause()) { ClassLoaderDataGraph::clear_claimed_marks(); } ! // The individual threads will set their evac-failure closures. ! if (PrintTerminationStats) { ! print_termination_stats_hdr(gclog_or_tty); ! } workers()->run_task(&g1_par_task); end_par_time_sec = os::elapsedTime(); // Closing the inner scope will execute the destructor --- 5050,5060 ---- // InitialMark needs claim bits to keep track of the marked-through CLDs. if (collector_state()->during_initial_mark_pause()) { ClassLoaderDataGraph::clear_claimed_marks(); } ! print_termination_stats_hdr(); workers()->run_task(&g1_par_task); end_par_time_sec = os::elapsedTime(); // Closing the inner scope will execute the destructor
*** 5383,5397 **** HeapWord* tams, HeapWord* end) { guarantee(tams <= end, "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)); HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end); if (result < end) { ! gclog_or_tty->cr(); ! gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT, ! bitmap_name, p2i(result)); ! gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, ! bitmap_name, p2i(tams), p2i(end)); return false; } return true; } --- 5278,5289 ---- HeapWord* tams, HeapWord* end) { guarantee(tams <= end, "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)); HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end); if (result < end) { ! log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result)); ! log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end)); return false; } return true; }
*** 5412,5424 **** // if we happen to be in that state. if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) { res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end); } if (!res_p || !res_n) { ! gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT, ! HR_FORMAT_PARAMS(hr)); ! gclog_or_tty->print_cr("#### Caller: %s", caller); return false; } return true; } --- 5304,5315 ---- // if we happen to be in that state. if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) { res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end); } if (!res_p || !res_n) { ! log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr)); ! log_info(gc, verify)("#### Caller: %s", caller); return false; } return true; }
*** 5466,5510 **** virtual bool doHeapRegion(HeapRegion* hr) { uint i = hr->hrm_index(); InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i); if (hr->is_humongous()) { if (hr->in_collection_set()) { ! gclog_or_tty->print_cr("\n## humongous region %u in CSet", i); _failures = true; return true; } if (cset_state.is_in_cset()) { ! gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i); _failures = true; return true; } if (hr->is_continues_humongous() && cset_state.is_humongous()) { ! gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i); _failures = true; return true; } } else { if (cset_state.is_humongous()) { ! gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i); _failures = true; return true; } if (hr->in_collection_set() != cset_state.is_in_cset()) { ! gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u", hr->in_collection_set(), cset_state.value(), i); _failures = true; return true; } if (cset_state.is_in_cset()) { if (hr->is_young() != (cset_state.is_young())) { ! gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u", hr->is_young(), cset_state.value(), i); _failures = true; return true; } if (hr->is_old() != (cset_state.is_old())) { ! gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u", hr->is_old(), cset_state.value(), i); _failures = true; return true; } } --- 5357,5401 ---- virtual bool doHeapRegion(HeapRegion* hr) { uint i = hr->hrm_index(); InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i); if (hr->is_humongous()) { if (hr->in_collection_set()) { ! log_info(gc, verify)("\n## humongous region %u in CSet", i); _failures = true; return true; } if (cset_state.is_in_cset()) { ! log_info(gc, verify)("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i); _failures = true; return true; } if (hr->is_continues_humongous() && cset_state.is_humongous()) { ! log_info(gc, verify)("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i); _failures = true; return true; } } else { if (cset_state.is_humongous()) { ! log_info(gc, verify)("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i); _failures = true; return true; } if (hr->in_collection_set() != cset_state.is_in_cset()) { ! log_info(gc, verify)("\n## in CSet %d / cset state %d inconsistency for region %u", hr->in_collection_set(), cset_state.value(), i); _failures = true; return true; } if (cset_state.is_in_cset()) { if (hr->is_young() != (cset_state.is_young())) { ! log_info(gc, verify)("\n## is_young %d / cset state %d inconsistency for region %u", hr->is_young(), cset_state.value(), i); _failures = true; return true; } if (hr->is_old() != (cset_state.is_old())) { ! log_info(gc, verify)("\n## is_old %d / cset state %d inconsistency for region %u", hr->is_old(), cset_state.value(), i); _failures = true; return true; } }
*** 5710,5752 **** // required because stale remembered sets might reference locations that // are currently allocated into. uint region_idx = r->hrm_index(); if (!g1h->is_humongous_reclaim_candidate(region_idx) || !r->rem_set()->is_empty()) { ! ! if (G1TraceEagerReclaimHumongousObjects) { ! gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), r->rem_set()->occupied(), r->rem_set()->strong_code_roots_list_length(), next_bitmap->isMarked(r->bottom()), g1h->is_humongous_reclaim_candidate(region_idx), obj->is_typeArray() ); - } - return false; } guarantee(obj->is_typeArray(), "Only eagerly reclaiming type arrays is supported, but the object " PTR_FORMAT " is not.", p2i(r->bottom())); ! if (G1TraceEagerReclaimHumongousObjects) { ! gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), r->rem_set()->occupied(), r->rem_set()->strong_code_roots_list_length(), next_bitmap->isMarked(r->bottom()), g1h->is_humongous_reclaim_candidate(region_idx), obj->is_typeArray() ); ! } // Need to clear mark bit of the humongous object if already set. if (next_bitmap->isMarked(r->bottom())) { next_bitmap->clear(r->bottom()); } do { --- 5601,5638 ---- // required because stale remembered sets might reference locations that // are currently allocated into. uint region_idx = r->hrm_index(); if (!g1h->is_humongous_reclaim_candidate(region_idx) || !r->rem_set()->is_empty()) { ! log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), r->rem_set()->occupied(), r->rem_set()->strong_code_roots_list_length(), next_bitmap->isMarked(r->bottom()), g1h->is_humongous_reclaim_candidate(region_idx), obj->is_typeArray() ); return false; } guarantee(obj->is_typeArray(), "Only eagerly reclaiming type arrays is supported, but the object " PTR_FORMAT " is not.", p2i(r->bottom())); ! log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), r->rem_set()->occupied(), r->rem_set()->strong_code_roots_list_length(), next_bitmap->isMarked(r->bottom()), g1h->is_humongous_reclaim_candidate(region_idx), obj->is_typeArray() ); ! // Need to clear mark bit of the humongous object if already set. if (next_bitmap->isMarked(r->bottom())) { next_bitmap->clear(r->bottom()); } do {
*** 5776,5786 **** void G1CollectedHeap::eagerly_reclaim_humongous_regions() { assert_at_safepoint(true); if (!G1EagerReclaimHumongousObjects || ! (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) { g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); return; } double start_time = os::elapsedTime(); --- 5662,5672 ---- void G1CollectedHeap::eagerly_reclaim_humongous_regions() { assert_at_safepoint(true); if (!G1EagerReclaimHumongousObjects || ! (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) { g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); return; } double start_time = os::elapsedTime();
*** 5829,5842 **** cur = next; } } void G1CollectedHeap::set_free_regions_coming() { ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " "setting free regions coming"); - } assert(!free_regions_coming(), "pre-condition"); _free_regions_coming = true; } --- 5715,5726 ---- cur = next; } } void G1CollectedHeap::set_free_regions_coming() { ! log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : " "setting free regions coming"); assert(!free_regions_coming(), "pre-condition"); _free_regions_coming = true; }
*** 5847,5885 **** MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); _free_regions_coming = false; SecondaryFreeList_lock->notify_all(); } ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " "reset free regions coming"); - } } void G1CollectedHeap::wait_while_free_regions_coming() { // Most of the time we won't have to wait, so let's do a quick test // first before we take the lock. if (!free_regions_coming()) { return; } ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " "waiting for free regions"); - } { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (free_regions_coming()) { SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); } } ! if (G1ConcRegionFreeingVerbose) { ! gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " "done waiting for free regions"); - } } bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) { return _allocator->is_retained_old_region(hr); } --- 5731,5763 ---- MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); _free_regions_coming = false; SecondaryFreeList_lock->notify_all(); } ! log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : " "reset free regions coming"); } void G1CollectedHeap::wait_while_free_regions_coming() { // Most of the time we won't have to wait, so let's do a quick test // first before we take the lock. if (!free_regions_coming()) { return; } ! log_develop(gc, freelist)("G1ConcRegionFreeing [other] : " "waiting for free regions"); { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (free_regions_coming()) { SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); } } ! log_develop(gc, freelist)("G1ConcRegionFreeing [other] : " "done waiting for free regions"); } bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) { return _allocator->is_retained_old_region(hr); }
*** 5893,5903 **** bool _success; public: NoYoungRegionsClosure() : _success(true) { } bool doHeapRegion(HeapRegion* r) { if (r->is_young()) { ! gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young", p2i(r->bottom()), p2i(r->end())); _success = false; } return false; } --- 5771,5781 ---- bool _success; public: NoYoungRegionsClosure() : _success(true) { } bool doHeapRegion(HeapRegion* r) { if (r->is_young()) { ! log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young", p2i(r->bottom()), p2i(r->end())); _success = false; } return false; }
*** 6144,6157 **** bool expanded = false; uint index = _hrm.find_highest_free(&expanded); if (index != G1_NO_HRM_INDEX) { if (expanded) { ! ergo_verbose1(ErgoHeapSizing, ! "attempt heap expansion", ! ergo_format_reason("requested address range outside heap bounds") ! ergo_format_byte("region size"), HeapRegion::GrainWords * HeapWordSize); } _hrm.allocate_free_regions_starting_at(index, 1); return region_at(index); } --- 6022,6032 ---- bool expanded = false; uint index = _hrm.find_highest_free(&expanded); if (index != G1_NO_HRM_INDEX) { if (expanded) { ! log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B", HeapRegion::GrainWords * HeapWordSize); } _hrm.allocate_free_regions_starting_at(index, 1); return region_at(index); }
< prev index next >