src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 1311,1320 **** --- 1311,1321 ---- // Timing assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant"); gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + { GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); double start = os::elapsedTime();
*** 1336,1346 **** gc_prologue(true); increment_total_collections(true /* full gc */); increment_old_marking_cycles_started(); - size_t g1h_prev_used = used(); assert(used() == recalculate_used(), "Should be equal"); verify_before_gc(); pre_full_gc_dump(gc_timer); --- 1337,1346 ----
*** 1480,1546 **** } else { RebuildRSOutOfRegionClosure rebuild_rs(this); heap_region_iterate(&rebuild_rs); } - if (G1Log::fine()) { - print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); - } - if (true) { // FIXME // Ask the permanent generation to adjust size for full collections perm()->compute_new_size(); } - // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); - - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. - clear_cset_fast_test(); - - init_mutator_alloc_region(); - - double end = os::elapsedTime(); - g1_policy()->record_full_collection_end(); - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif - gc_epilogue(true); - // Discard all rset updates JavaThread::dirty_card_queue_set().abandon_logs(); assert(!G1DeferredRSUpdate ! || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); _young_list->reset_sampled_info(); // At this point there should be no regions in the // entire heap tagged as young. ! assert( check_young_list_empty(true /* check_heap */), "young list should be empty at this point"); // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); _hrs.verify_optional(); verify_region_sets_optional(); ! print_heap_after_gc(); ! trace_heap_after_gc(gc_tracer); // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); } post_full_gc_dump(gc_timer); gc_timer->register_gc_end(os::elapsed_counter()); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); --- 1480,1552 ---- } else { RebuildRSOutOfRegionClosure rebuild_rs(this); heap_region_iterate(&rebuild_rs); } if (true) { // FIXME // Ask the permanent generation to adjust size for full collections perm()->compute_new_size(); } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif // Discard all rset updates JavaThread::dirty_card_queue_set().abandon_logs(); assert(!G1DeferredRSUpdate ! || (G1DeferredRSUpdate && ! (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); _young_list->reset_sampled_info(); // At this point there should be no regions in the // entire heap tagged as young. ! assert(check_young_list_empty(true /* check_heap */), "young list should be empty at this point"); // Update the number of full collections that have been completed. increment_old_marking_cycles_completed(false /* concurrent */); _hrs.verify_optional(); verify_region_sets_optional(); ! // Start a new incremental collection set for the next pause ! assert(g1_policy()->collection_set() == NULL, "must be"); ! g1_policy()->start_incremental_cset_building(); + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + + init_mutator_alloc_region(); + + double end = os::elapsedTime(); + g1_policy()->record_full_collection_end(); + + if (G1Log::fine()) { + g1_policy()->print_heap_transition(); + } + // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated // before any GC notifications are raised. g1mm()->update_sizes(); + + gc_epilogue(true); } + if (G1Log::finer()) { + g1_policy()->print_detailed_heap_transition(); + } + + print_heap_after_gc(); + trace_heap_after_gc(gc_tracer); + post_full_gc_dump(gc_timer); + } gc_timer->register_gc_end(os::elapsed_counter()); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
*** 2016,2027 **** // If this happens then we could end up using a non-optimal // compressed oops mode. // Since max_byte_size is aligned to the size of a heap region (checked // above), we also need to align the perm gen size as it might not be. ! const size_t total_reserved = max_byte_size + ! align_size_up(pgs->max_size(), HeapRegion::GrainBytes); Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm"); char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes, --- 2022,2037 ---- // If this happens then we could end up using a non-optimal // compressed oops mode. // Since max_byte_size is aligned to the size of a heap region (checked // above), we also need to align the perm gen size as it might not be. ! size_t total_reserved = 0; ! ! total_reserved = add_and_check_overflow(total_reserved, max_byte_size); ! size_t pg_max_size = (size_t) align_size_up(pgs->max_size(), HeapRegion::GrainBytes); ! total_reserved = add_and_check_overflow(total_reserved, pg_max_size); ! Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm"); char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
*** 3355,3365 **** verify(silent, VerifyOption_G1UsePrevMarking); } void G1CollectedHeap::verify(bool silent, VerifyOption vo) { ! if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } VerifyRootsClosure rootsCl(vo); assert(Thread::current()->is_VM_thread(), "Expected to be executed serially by the VM thread at this point"); --- 3365,3375 ---- verify(silent, VerifyOption_G1UsePrevMarking); } void G1CollectedHeap::verify(bool silent, VerifyOption vo) { ! if (SafepointSynchronize::is_at_safepoint()) { if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } VerifyRootsClosure rootsCl(vo); assert(Thread::current()->is_VM_thread(), "Expected to be executed serially by the VM thread at this point");
*** 3453,3463 **** #endif gclog_or_tty->flush(); } guarantee(!failures, "there should not have been any failures"); } else { ! if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); } } class PrintRegionClosure: public HeapRegionClosure { outputStream* _st; --- 3463,3474 ---- #endif gclog_or_tty->flush(); } guarantee(!failures, "there should not have been any failures"); } else { ! if (!silent) ! gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) "); } } class PrintRegionClosure: public HeapRegionClosure { outputStream* _st;
*** 3915,3934 **** // Preserving the old comment here if that helps the investigation: // // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); - size_t start_used_bytes = used(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); _young_list->print(); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE ! g1_policy()->record_collection_pause_start(sample_start_time_sec, ! start_used_bytes); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start --- 3926,3943 ---- // Preserving the old comment here if that helps the investigation: // // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); _young_list->print(); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE ! g1_policy()->record_collection_pause_start(sample_start_time_sec); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the // root regions as it's the only way to ensure that all the // objects on them have been correctly scanned before we start