--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 16 16:28:29 2013 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu May 16 16:28:29 2013 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1313,233 +1313,239 @@ gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); - TraceCollectorStats tcs(g1mm()->full_collection_counters()); - TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); + { + GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); + TraceCollectorStats tcs(g1mm()->full_collection_counters()); + TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); - double start = os::elapsedTime(); - g1_policy()->record_full_collection_start(); + double start = os::elapsedTime(); + g1_policy()->record_full_collection_start(); - // Note: When we have a more flexible GC logging framework that - // allows us to add optional attributes to a GC log record we - // could consider timing and reporting how long we wait in the - // following two methods. - wait_while_free_regions_coming(); - // If we start the compaction before the CM threads finish - // scanning the root regions we might trip them over as we'll - // be moving objects / updating references. So let's wait until - // they are done. By telling them to abort, they should complete - // early. - _cm->root_regions()->abort(); - _cm->root_regions()->wait_until_scan_finished(); - append_secondary_free_list_if_not_empty_with_lock(); + // Note: When we have a more flexible GC logging framework that + // allows us to add optional attributes to a GC log record we + // could consider timing and reporting how long we wait in the + // following two methods. + wait_while_free_regions_coming(); + // If we start the compaction before the CM threads finish + // scanning the root regions we might trip them over as we'll + // be moving objects / updating references. So let's wait until + // they are done. By telling them to abort, they should complete + // early. + _cm->root_regions()->abort(); + _cm->root_regions()->wait_until_scan_finished(); + append_secondary_free_list_if_not_empty_with_lock(); - gc_prologue(true); - increment_total_collections(true /* full gc */); - increment_old_marking_cycles_started(); + gc_prologue(true); + increment_total_collections(true /* full gc */); + increment_old_marking_cycles_started(); - size_t g1h_prev_used = used(); - assert(used() == recalculate_used(), "Should be equal"); + assert(used() == recalculate_used(), "Should be equal"); - verify_before_gc(); + verify_before_gc(); - pre_full_gc_dump(gc_timer); + pre_full_gc_dump(gc_timer); - COMPILER2_PRESENT(DerivedPointerTable::clear()); + COMPILER2_PRESENT(DerivedPointerTable::clear()); - // Disable discovery and empty the discovered lists - // for the CM ref processor. - ref_processor_cm()->disable_discovery(); - ref_processor_cm()->abandon_partial_discovery(); - ref_processor_cm()->verify_no_references_recorded(); + // Disable discovery and empty the discovered lists + // for the CM ref processor. + ref_processor_cm()->disable_discovery(); + ref_processor_cm()->abandon_partial_discovery(); + ref_processor_cm()->verify_no_references_recorded(); - // Abandon current iterations of concurrent marking and concurrent - // refinement, if any are in progress. We have to do this before - // wait_until_scan_finished() below. - concurrent_mark()->abort(); + // Abandon current iterations of concurrent marking and concurrent + // refinement, if any are in progress. We have to do this before + // wait_until_scan_finished() below. + concurrent_mark()->abort(); - // Make sure we'll choose a new allocation region afterwards. - release_mutator_alloc_region(); - abandon_gc_alloc_regions(); - g1_rem_set()->cleanupHRRS(); + // Make sure we'll choose a new allocation region afterwards. + release_mutator_alloc_region(); + abandon_gc_alloc_regions(); + g1_rem_set()->cleanupHRRS(); - // We should call this after we retire any currently active alloc - // regions so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(true /* full */, (size_t) total_collections()); + // We should call this after we retire any currently active alloc + // regions so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - // We may have added regions to the current incremental collection - // set between the last GC or pause and now. We need to clear the - // incremental collection set and then start rebuilding it afresh - // after this full GC. - abandon_collection_set(g1_policy()->inc_cset_head()); - g1_policy()->clear_incremental_cset(); - g1_policy()->stop_incremental_cset_building(); + // We may have added regions to the current incremental collection + // set between the last GC or pause and now. We need to clear the + // incremental collection set and then start rebuilding it afresh + // after this full GC. + abandon_collection_set(g1_policy()->inc_cset_head()); + g1_policy()->clear_incremental_cset(); + g1_policy()->stop_incremental_cset_building(); - tear_down_region_sets(false /* free_list_only */); - g1_policy()->set_gcs_are_young(true); + tear_down_region_sets(false /* free_list_only */); + g1_policy()->set_gcs_are_young(true); - // See the comments in g1CollectedHeap.hpp and - // G1CollectedHeap::ref_processing_init() about - // how reference processing currently works in G1. + // See the comments in g1CollectedHeap.hpp and + // G1CollectedHeap::ref_processing_init() about + // how reference processing currently works in G1. - // Temporarily make discovery by the STW ref processor single threaded (non-MT). - ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); + // Temporarily make discovery by the STW ref processor single threaded (non-MT). + ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); - // Temporarily clear the STW ref processor's _is_alive_non_header field. - ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); + // Temporarily clear the STW ref processor's _is_alive_non_header field. + ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); - ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); - ref_processor_stw()->setup_policy(do_clear_all_soft_refs); + ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); + ref_processor_stw()->setup_policy(do_clear_all_soft_refs); - // Do collection work - { - HandleMark hm; // Discard invalid handles created during gc - G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); - } + // Do collection work + { + HandleMark hm; // Discard invalid handles created during gc + G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); + } - assert(free_regions() == 0, "we should not have added any free regions"); - rebuild_region_sets(false /* free_list_only */); + assert(free_regions() == 0, "we should not have added any free regions"); + rebuild_region_sets(false /* free_list_only */); - // Enqueue any discovered reference objects that have - // not been removed from the discovered lists. - ref_processor_stw()->enqueue_discovered_references(); + // Enqueue any discovered reference objects that have + // not been removed from the discovered lists. + ref_processor_stw()->enqueue_discovered_references(); - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); - MemoryService::track_memory_usage(); + MemoryService::track_memory_usage(); - verify_after_gc(); + verify_after_gc(); - assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); - ref_processor_stw()->verify_no_references_recorded(); + assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); + ref_processor_stw()->verify_no_references_recorded(); - // Note: since we've just done a full GC, concurrent - // marking is no longer active. Therefore we need not - // re-enable reference discovery for the CM ref processor. - // That will be done at the start of the next marking cycle. - assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); - ref_processor_cm()->verify_no_references_recorded(); + // Note: since we've just done a full GC, concurrent + // marking is no longer active. Therefore we need not + // re-enable reference discovery for the CM ref processor. + // That will be done at the start of the next marking cycle. + assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); + ref_processor_cm()->verify_no_references_recorded(); - reset_gc_time_stamp(); - // Since everything potentially moved, we will clear all remembered - // sets, and clear all cards. Later we will rebuild remembered - // sets. We will also reset the GC time stamps of the regions. - clear_rsets_post_compaction(); - check_gc_time_stamps(); + reset_gc_time_stamp(); + // Since everything potentially moved, we will clear all remembered + // sets, and clear all cards. Later we will rebuild remembered + // sets. We will also reset the GC time stamps of the regions. + clear_rsets_post_compaction(); + check_gc_time_stamps(); - // Resize the heap if necessary. - resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + // Resize the heap if necessary. + resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); - if (_hr_printer.is_active()) { - // We should do this after we potentially resize the heap so - // that all the COMMIT / UNCOMMIT events are generated before - // the end GC event. + if (_hr_printer.is_active()) { + // We should do this after we potentially resize the heap so + // that all the COMMIT / UNCOMMIT events are generated before + // the end GC event. - print_hrs_post_compaction(); - _hr_printer.end_gc(true /* full */, (size_t) total_collections()); - } + print_hrs_post_compaction(); + _hr_printer.end_gc(true /* full */, (size_t) total_collections()); + } - if (_cg1r->use_cache()) { - _cg1r->clear_and_record_card_counts(); - _cg1r->clear_hot_cache(); - } + if (_cg1r->use_cache()) { + _cg1r->clear_and_record_card_counts(); + _cg1r->clear_hot_cache(); + } - // Rebuild remembered sets of all regions. - if (G1CollectedHeap::use_parallel_gc_threads()) { - uint n_workers = - AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), - workers()->active_workers(), - Threads::number_of_non_daemon_threads()); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); - workers()->set_active_workers(n_workers); - // Set parallel threads in the heap (_n_par_threads) only - // before a parallel phase and always reset it to 0 after - // the phase so that the number of parallel threads does - // no get carried forward to a serial phase where there - // may be code that is "possibly_parallel". - set_par_threads(n_workers); + // Rebuild remembered sets of all regions. + if (G1CollectedHeap::use_parallel_gc_threads()) { + uint n_workers = + AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), + workers()->active_workers(), + Threads::number_of_non_daemon_threads()); + assert(UseDynamicNumberOfGCThreads || + n_workers == workers()->total_workers(), + "If not dynamic should be using all the workers"); + workers()->set_active_workers(n_workers); + // Set parallel threads in the heap (_n_par_threads) only + // before a parallel phase and always reset it to 0 after + // the phase so that the number of parallel threads does + // no get carried forward to a serial phase where there + // may be code that is "possibly_parallel". + set_par_threads(n_workers); - ParRebuildRSTask rebuild_rs_task(this); - assert(check_heap_region_claim_values( - HeapRegion::InitialClaimValue), "sanity check"); - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "Unless dynamic should use total workers"); - // Use the most recent number of active workers - assert(workers()->active_workers() > 0, - "Active workers not properly set"); - set_par_threads(workers()->active_workers()); - workers()->run_task(&rebuild_rs_task); - set_par_threads(0); - assert(check_heap_region_claim_values( - HeapRegion::RebuildRSClaimValue), "sanity check"); - reset_heap_region_claim_values(); - } else { - RebuildRSOutOfRegionClosure rebuild_rs(this); - heap_region_iterate(&rebuild_rs); - } + ParRebuildRSTask rebuild_rs_task(this); + assert(check_heap_region_claim_values( + HeapRegion::InitialClaimValue), "sanity check"); + assert(UseDynamicNumberOfGCThreads || + workers()->active_workers() == workers()->total_workers(), + "Unless dynamic should use total workers"); + // Use the most recent number of active workers + assert(workers()->active_workers() > 0, + "Active workers not properly set"); + set_par_threads(workers()->active_workers()); + workers()->run_task(&rebuild_rs_task); + set_par_threads(0); + assert(check_heap_region_claim_values( + HeapRegion::RebuildRSClaimValue), "sanity check"); + reset_heap_region_claim_values(); + } else { + RebuildRSOutOfRegionClosure rebuild_rs(this); + heap_region_iterate(&rebuild_rs); + } - if (G1Log::fine()) { - print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); - } + if (true) { // FIXME + // Ask the permanent generation to adjust size for full collections + perm()->compute_new_size(); + } - if (true) { // FIXME - // Ask the permanent generation to adjust size for full collections - perm()->compute_new_size(); - } +#ifdef TRACESPINNING + ParallelTaskTerminator::print_termination_counts(); +#endif - // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); + // Discard all rset updates + JavaThread::dirty_card_queue_set().abandon_logs(); + assert(!G1DeferredRSUpdate + || (G1DeferredRSUpdate && + (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. - clear_cset_fast_test(); + _young_list->reset_sampled_info(); + // At this point there should be no regions in the + // entire heap tagged as young. + assert(check_young_list_empty(true /* check_heap */), + "young list should be empty at this point"); - init_mutator_alloc_region(); + // Update the number of full collections that have been completed. + increment_old_marking_cycles_completed(false /* concurrent */); - double end = os::elapsedTime(); - g1_policy()->record_full_collection_end(); + _hrs.verify_optional(); + verify_region_sets_optional(); -#ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); -#endif + // Start a new incremental collection set for the next pause + assert(g1_policy()->collection_set() == NULL, "must be"); + g1_policy()->start_incremental_cset_building(); - gc_epilogue(true); + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); - // Discard all rset updates - JavaThread::dirty_card_queue_set().abandon_logs(); - assert(!G1DeferredRSUpdate - || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); + init_mutator_alloc_region(); - _young_list->reset_sampled_info(); - // At this point there should be no regions in the - // entire heap tagged as young. - assert( check_young_list_empty(true /* check_heap */), - "young list should be empty at this point"); + double end = os::elapsedTime(); + g1_policy()->record_full_collection_end(); - // Update the number of full collections that have been completed. - increment_old_marking_cycles_completed(false /* concurrent */); + if (G1Log::fine()) { + g1_policy()->print_heap_transition(); + } - _hrs.verify_optional(); - verify_region_sets_optional(); + // We must call G1MonitoringSupport::update_sizes() in the same scoping level + // as an active TraceMemoryManagerStats object (i.e. before the destructor for the + // TraceMemoryManagerStats is called) so that the G1 memory pools are updated + // before any GC notifications are raised. + g1mm()->update_sizes(); + gc_epilogue(true); + } + + if (G1Log::finer()) { + g1_policy()->print_detailed_heap_transition(); + } + print_heap_after_gc(); trace_heap_after_gc(gc_tracer); - // We must call G1MonitoringSupport::update_sizes() in the same scoping level - // as an active TraceMemoryManagerStats object (i.e. before the destructor for the - // TraceMemoryManagerStats is called) so that the G1 memory pools are updated - // before any GC notifications are raised. - g1mm()->update_sizes(); + post_full_gc_dump(gc_timer); } - post_full_gc_dump(gc_timer); - gc_timer->register_gc_end(os::elapsed_counter()); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); @@ -2018,8 +2024,12 @@ // Since max_byte_size is aligned to the size of a heap region (checked // above), we also need to align the perm gen size as it might not be. - const size_t total_reserved = max_byte_size + - align_size_up(pgs->max_size(), HeapRegion::GrainBytes); + size_t total_reserved = 0; + + total_reserved = add_and_check_overflow(total_reserved, max_byte_size); + size_t pg_max_size = (size_t) align_size_up(pgs->max_size(), HeapRegion::GrainBytes); + total_reserved = add_and_check_overflow(total_reserved, pg_max_size); + Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm"); char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); @@ -3357,12 +3367,12 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) { - if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { + if (SafepointSynchronize::is_at_safepoint()) { if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); } VerifyRootsClosure rootsCl(vo); assert(Thread::current()->is_VM_thread(), - "Expected to be executed serially by the VM thread at this point"); + "Expected to be executed serially by the VM thread at this point"); CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false); @@ -3455,7 +3465,8 @@ } guarantee(!failures, "there should not have been any failures"); } else { - if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) "); + if (!silent) + gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) "); } } @@ -3917,7 +3928,6 @@ // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); - size_t start_used_bytes = used(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); @@ -3925,8 +3935,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_collection_pause_start(sample_start_time_sec, - start_used_bytes); + g1_policy()->record_collection_pause_start(sample_start_time_sec); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the