src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 4466 : 8010463: G1: Crashes with -UseTLAB and heap verification
Summary: Some parts of the G1 heap can only be walked during a safepoint. Skip verifying these parts of the heap when verifying during JVM startup.
Reviewed-by: brutisso, tschatzl
rev 4467 : 8010780: G1: Eden occupancy/capacity output wrong after a full GC
Summary: Move the calculation and recording of eden capacity to the start of a GC and print a detailed heap transition for full GCs.
Reviewed-by: tschatzl, jmasa

*** 1,7 **** /* ! * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 1311,1320 **** --- 1311,1321 ---- // Timing assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + { GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); double start = os::elapsedTime();
*** 1336,1346 **** gc_prologue(true); increment_total_collections(true /* full gc */); increment_old_marking_cycles_started(); - size_t g1h_prev_used = used(); assert(used() == recalculate_used(), "Should be equal"); verify_before_gc(); pre_full_gc_dump(gc_timer); --- 1337,1346 ----
*** 1480,1546 **** } else { RebuildRSOutOfRegionClosure rebuild_rs(this); heap_region_iterate(&rebuild_rs); } - if (G1Log::fine()) { - print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); - } - if (true) { // FIXME // Ask the permanent generation to adjust size for full collections perm()->compute_new_size(); } - // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); - - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. - clear_cset_fast_test(); - - init_mutator_alloc_region(); - - double end = os::elapsedTime(); - g1_policy()->record_full_collection_end(); - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif - gc_epilogue(true); - // Discard all rset updates JavaThread::dirty_card_queue_set().abandon_logs(); assert(!G1DeferredRSUpdate ! || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); _young_list->reset_sampled_info(); // At this point there should be no regions in the // entire heap tagged as young. ! assert( check_young_list_empty(true /* check_heap */), "young list should be empty at this point"); // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); _hrs.verify_optional(); verify_region_sets_optional(); - print_heap_after_gc(); - trace_heap_after_gc(gc_tracer); - // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); } post_full_gc_dump(gc_timer); gc_timer->register_gc_end(os::elapsed_counter()); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); --- 1480,1552 ---- } else { RebuildRSOutOfRegionClosure rebuild_rs(this); heap_region_iterate(&rebuild_rs); } if (true) { // FIXME // Ask the permanent generation to adjust size for full collections perm()->compute_new_size(); } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif // Discard all rset updates JavaThread::dirty_card_queue_set().abandon_logs(); assert(!G1DeferredRSUpdate ! || (G1DeferredRSUpdate && ! (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); _young_list->reset_sampled_info(); // At this point there should be no regions in the // entire heap tagged as young. ! assert(check_young_list_empty(true /* check_heap */), "young list should be empty at this point"); // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); _hrs.verify_optional(); verify_region_sets_optional(); // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); + + // Start a new incremental collection set for the next pause + assert(g1_policy()->collection_set() == NULL, "must be"); + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + + init_mutator_alloc_region(); + + double end = os::elapsedTime(); + g1_policy()->record_full_collection_end(); + + if (G1Log::fine()) { + g1_policy()->print_heap_transition(); + } + + gc_epilogue(true); + } + + if (G1Log::finer()) { + g1_policy()->print_detailed_heap_transition(); } + print_heap_after_gc(); + trace_heap_after_gc(gc_tracer); + post_full_gc_dump(gc_timer); + } gc_timer->register_gc_end(os::elapsed_counter()); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
*** 3916,3935 **** // Preserving the old comment here if that helps the investigation: // // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); - size_t start_used_bytes = used(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); _young_list->print(); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE ! g1_policy()->record_collection_pause_start(sample_start_time_sec, ! start_used_bytes); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start --- 3922,3939 ---- // Preserving the old comment here if that helps the investigation: // // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); _young_list->print(); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE ! g1_policy()->record_collection_pause_start(sample_start_time_sec); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start