< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




1490       // If we start the compaction before the CM threads finish
1491       // scanning the root regions we might trip them over as we'll
1492       // be moving objects / updating references. So let's wait until
1493       // they are done. By telling them to abort, they should complete
1494       // early.
1495       _cm->root_regions()->abort();
1496       _cm->root_regions()->wait_until_scan_finished();
1497       append_secondary_free_list_if_not_empty_with_lock();
1498 
1499       gc_prologue(true);
1500       increment_total_collections(true /* full gc */);
1501       increment_old_marking_cycles_started();
1502 
1503       assert(used() == recalculate_used(), "Should be equal");
1504 
1505       verify_before_gc();
1506 
1507       check_bitmaps("Full GC Start");
1508       pre_full_gc_dump(gc_timer);
1509 
1510       COMPILER2_PRESENT(DerivedPointerTable::clear());


1511 
1512       // Disable discovery and empty the discovered lists
1513       // for the CM ref processor.
1514       ref_processor_cm()->disable_discovery();
1515       ref_processor_cm()->abandon_partial_discovery();
1516       ref_processor_cm()->verify_no_references_recorded();
1517 
1518       // Abandon current iterations of concurrent marking and concurrent
1519       // refinement, if any are in progress. We have to do this before
1520       // wait_until_scan_finished() below.
1521       concurrent_mark()->abort();
1522 
1523       // Make sure we'll choose a new allocation region afterwards.
1524       _allocator->release_mutator_alloc_region();
1525       _allocator->abandon_gc_alloc_regions();
1526       g1_rem_set()->cleanupHRRS();
1527 
1528       // We should call this after we retire any currently active alloc
1529       // regions so that all the ALLOC / RETIRE events are generated
1530       // before the start GC event.


1550 
1551       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1552       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1553 
1554       ref_processor_stw()->enable_discovery();
1555       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1556 
1557       // Do collection work
1558       {
1559         HandleMark hm;  // Discard invalid handles created during gc
1560         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1561       }
1562 
1563       assert(num_free_regions() == 0, "we should not have added any free regions");
1564       rebuild_region_sets(false /* free_list_only */);
1565 
1566       // Enqueue any discovered reference objects that have
1567       // not been removed from the discovered lists.
1568       ref_processor_stw()->enqueue_discovered_references();
1569 
1570       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());


1571 
1572       MemoryService::track_memory_usage();
1573 
1574       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1575       ref_processor_stw()->verify_no_references_recorded();
1576 
1577       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1578       ClassLoaderDataGraph::purge();
1579       MetaspaceAux::verify_metrics();
1580 
1581       // Note: since we've just done a full GC, concurrent
1582       // marking is no longer active. Therefore we need not
1583       // re-enable reference discovery for the CM ref processor.
1584       // That will be done at the start of the next marking cycle.
1585       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1586       ref_processor_cm()->verify_no_references_recorded();
1587 
1588       reset_gc_time_stamp();
1589       // Since everything potentially moved, we will clear all remembered
1590       // sets, and clear all cards.  Later we will rebuild remembered


3613   ensure_parsability(true);
3614 
3615   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3616       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3617     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3618   }
3619 }
3620 
3621 void G1CollectedHeap::gc_epilogue(bool full) {
3622 
3623   if (G1SummarizeRSetStats &&
3624       (G1SummarizeRSetStatsPeriod > 0) &&
3625       // we are at the end of the GC. Total collections has already been increased.
3626       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3627     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3628   }
3629 
3630   // FIXME: what is this about?
3631   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3632   // is set.
3633   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3634                         "derived pointer present"));

3635   // always_do_update_barrier = true;
3636 
3637   resize_all_tlabs();
3638   allocation_context_stats().update(full);
3639 
3640   // We have just completed a GC. Update the soft reference
3641   // policy with the new heap occupancy
3642   Universe::update_heap_info_at_gc();
3643 }
3644 
3645 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3646                                                uint gc_count_before,
3647                                                bool* succeeded,
3648                                                GCCause::Cause gc_cause) {
3649   assert_heap_not_locked_and_not_at_safepoint();
3650   g1_policy()->record_stop_world_start();
3651   VM_G1IncCollectionPause op(gc_count_before,
3652                              word_size,
3653                              false, /* should_initiate_conc_mark */
3654                              g1_policy()->max_pause_time_ms(),


4020       append_secondary_free_list_if_not_empty_with_lock();
4021     }
4022 
4023     assert(check_young_list_well_formed(), "young list should be well formed");
4024 
4025     // Don't dynamically change the number of GC threads this early.  A value of
4026     // 0 is used to indicate serial work.  When parallel work is done,
4027     // it will be set.
4028 
4029     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
4030       IsGCActiveMark x;
4031 
4032       gc_prologue(false);
4033       increment_total_collections(false /* full gc */);
4034       increment_gc_time_stamp();
4035 
4036       verify_before_gc();
4037 
4038       check_bitmaps("GC Start");
4039 
4040       COMPILER2_PRESENT(DerivedPointerTable::clear());


4041 
4042       // Please see comment in g1CollectedHeap.hpp and
4043       // G1CollectedHeap::ref_processing_init() to see how
4044       // reference processing currently works in G1.
4045 
4046       // Enable discovery in the STW reference processor
4047       ref_processor_stw()->enable_discovery();
4048 
4049       {
4050         // We want to temporarily turn off discovery by the
4051         // CM ref processor, if necessary, and turn it back on
4052         // on again later if we do. Using a scoped
4053         // NoRefDiscovery object will do this.
4054         NoRefDiscovery no_cm_discovery(ref_processor_cm());
4055 
4056         // Forget the current alloc region (we might even choose it to be part
4057         // of the collection set!).
4058         _allocator->release_mutator_alloc_region();
4059 
4060         // We should call this after we retire the mutator alloc


5649 
5650   if (evacuation_failed()) {
5651     remove_self_forwarding_pointers();
5652 
5653     // Reset the G1EvacuationFailureALot counters and flags
5654     // Note: the values are reset only when an actual
5655     // evacuation failure occurs.
5656     NOT_PRODUCT(reset_evacuation_should_fail();)
5657   }
5658 
5659   // Enqueue any remaining references remaining on the STW
5660   // reference processor's discovered lists. We need to do
5661   // this after the card table is cleaned (and verified) as
5662   // the act of enqueueing entries on to the pending list
5663   // will log these updates (and dirty their associated
5664   // cards). We need these updates logged to update any
5665   // RSets.
5666   enqueue_discovered_references(per_thread_states);
5667 
5668   redirty_logged_cards();
5669   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());


5670 }
5671 
5672 void G1CollectedHeap::record_obj_copy_mem_stats() {
5673   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5674                                                create_g1_evac_summary(&_old_evac_stats));
5675 }
5676 
5677 void G1CollectedHeap::free_region(HeapRegion* hr,
5678                                   FreeRegionList* free_list,
5679                                   bool par,
5680                                   bool locked) {
5681   assert(!hr->is_free(), "the region should not be free");
5682   assert(!hr->is_empty(), "the region should not be empty");
5683   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5684   assert(free_list != NULL, "pre-condition");
5685 
5686   if (G1VerifyBitmaps) {
5687     MemRegion mr(hr->bottom(), hr->end());
5688     concurrent_mark()->clearRangePrevBitmap(mr);
5689   }




1490       // If we start the compaction before the CM threads finish
1491       // scanning the root regions we might trip them over as we'll
1492       // be moving objects / updating references. So let's wait until
1493       // they are done. By telling them to abort, they should complete
1494       // early.
1495       _cm->root_regions()->abort();
1496       _cm->root_regions()->wait_until_scan_finished();
1497       append_secondary_free_list_if_not_empty_with_lock();
1498 
1499       gc_prologue(true);
1500       increment_total_collections(true /* full gc */);
1501       increment_old_marking_cycles_started();
1502 
1503       assert(used() == recalculate_used(), "Should be equal");
1504 
1505       verify_before_gc();
1506 
1507       check_bitmaps("Full GC Start");
1508       pre_full_gc_dump(gc_timer);
1509 
1510 #if defined(COMPILER2) || INCLUDE_JVMCI
1511       DerivedPointerTable::clear();
1512 #endif
1513 
1514       // Disable discovery and empty the discovered lists
1515       // for the CM ref processor.
1516       ref_processor_cm()->disable_discovery();
1517       ref_processor_cm()->abandon_partial_discovery();
1518       ref_processor_cm()->verify_no_references_recorded();
1519 
1520       // Abandon current iterations of concurrent marking and concurrent
1521       // refinement, if any are in progress. We have to do this before
1522       // wait_until_scan_finished() below.
1523       concurrent_mark()->abort();
1524 
1525       // Make sure we'll choose a new allocation region afterwards.
1526       _allocator->release_mutator_alloc_region();
1527       _allocator->abandon_gc_alloc_regions();
1528       g1_rem_set()->cleanupHRRS();
1529 
1530       // We should call this after we retire any currently active alloc
1531       // regions so that all the ALLOC / RETIRE events are generated
1532       // before the start GC event.


1552 
1553       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1554       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1555 
1556       ref_processor_stw()->enable_discovery();
1557       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1558 
1559       // Do collection work
1560       {
1561         HandleMark hm;  // Discard invalid handles created during gc
1562         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1563       }
1564 
1565       assert(num_free_regions() == 0, "we should not have added any free regions");
1566       rebuild_region_sets(false /* free_list_only */);
1567 
1568       // Enqueue any discovered reference objects that have
1569       // not been removed from the discovered lists.
1570       ref_processor_stw()->enqueue_discovered_references();
1571 
1572 #if defined(COMPILER2) || INCLUDE_JVMCI
1573       DerivedPointerTable::update_pointers();
1574 #endif
1575 
1576       MemoryService::track_memory_usage();
1577 
1578       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1579       ref_processor_stw()->verify_no_references_recorded();
1580 
1581       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1582       ClassLoaderDataGraph::purge();
1583       MetaspaceAux::verify_metrics();
1584 
1585       // Note: since we've just done a full GC, concurrent
1586       // marking is no longer active. Therefore we need not
1587       // re-enable reference discovery for the CM ref processor.
1588       // That will be done at the start of the next marking cycle.
1589       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1590       ref_processor_cm()->verify_no_references_recorded();
1591 
1592       reset_gc_time_stamp();
1593       // Since everything potentially moved, we will clear all remembered
1594       // sets, and clear all cards.  Later we will rebuild remembered


3617   ensure_parsability(true);
3618 
3619   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3620       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3621     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3622   }
3623 }
3624 
3625 void G1CollectedHeap::gc_epilogue(bool full) {
3626 
3627   if (G1SummarizeRSetStats &&
3628       (G1SummarizeRSetStatsPeriod > 0) &&
3629       // we are at the end of the GC. Total collections has already been increased.
3630       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3631     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3632   }
3633 
3634   // FIXME: what is this about?
3635   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3636   // is set.
3637 #if defined(COMPILER2) || INCLUDE_JVMCI
3638   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3639 #endif
3640   // always_do_update_barrier = true;
3641 
3642   resize_all_tlabs();
3643   allocation_context_stats().update(full);
3644 
3645   // We have just completed a GC. Update the soft reference
3646   // policy with the new heap occupancy
3647   Universe::update_heap_info_at_gc();
3648 }
3649 
3650 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3651                                                uint gc_count_before,
3652                                                bool* succeeded,
3653                                                GCCause::Cause gc_cause) {
3654   assert_heap_not_locked_and_not_at_safepoint();
3655   g1_policy()->record_stop_world_start();
3656   VM_G1IncCollectionPause op(gc_count_before,
3657                              word_size,
3658                              false, /* should_initiate_conc_mark */
3659                              g1_policy()->max_pause_time_ms(),


4025       append_secondary_free_list_if_not_empty_with_lock();
4026     }
4027 
4028     assert(check_young_list_well_formed(), "young list should be well formed");
4029 
4030     // Don't dynamically change the number of GC threads this early.  A value of
4031     // 0 is used to indicate serial work.  When parallel work is done,
4032     // it will be set.
4033 
4034     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
4035       IsGCActiveMark x;
4036 
4037       gc_prologue(false);
4038       increment_total_collections(false /* full gc */);
4039       increment_gc_time_stamp();
4040 
4041       verify_before_gc();
4042 
4043       check_bitmaps("GC Start");
4044 
4045 #if defined(COMPILER2) || INCLUDE_JVMCI
4046       DerivedPointerTable::clear();
4047 #endif
4048 
4049       // Please see comment in g1CollectedHeap.hpp and
4050       // G1CollectedHeap::ref_processing_init() to see how
4051       // reference processing currently works in G1.
4052 
4053       // Enable discovery in the STW reference processor
4054       ref_processor_stw()->enable_discovery();
4055 
4056       {
4057         // We want to temporarily turn off discovery by the
4058         // CM ref processor, if necessary, and turn it back on
4059         // on again later if we do. Using a scoped
4060         // NoRefDiscovery object will do this.
4061         NoRefDiscovery no_cm_discovery(ref_processor_cm());
4062 
4063         // Forget the current alloc region (we might even choose it to be part
4064         // of the collection set!).
4065         _allocator->release_mutator_alloc_region();
4066 
4067         // We should call this after we retire the mutator alloc


5656 
5657   if (evacuation_failed()) {
5658     remove_self_forwarding_pointers();
5659 
5660     // Reset the G1EvacuationFailureALot counters and flags
5661     // Note: the values are reset only when an actual
5662     // evacuation failure occurs.
5663     NOT_PRODUCT(reset_evacuation_should_fail();)
5664   }
5665 
5666   // Enqueue any remaining references remaining on the STW
5667   // reference processor's discovered lists. We need to do
5668   // this after the card table is cleaned (and verified) as
5669   // the act of enqueueing entries on to the pending list
5670   // will log these updates (and dirty their associated
5671   // cards). We need these updates logged to update any
5672   // RSets.
5673   enqueue_discovered_references(per_thread_states);
5674 
5675   redirty_logged_cards();
5676 #if defined(COMPILER2) || INCLUDE_JVMCI
5677   DerivedPointerTable::update_pointers();
5678 #endif
5679 }
5680 
5681 void G1CollectedHeap::record_obj_copy_mem_stats() {
5682   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
5683                                                create_g1_evac_summary(&_old_evac_stats));
5684 }
5685 
5686 void G1CollectedHeap::free_region(HeapRegion* hr,
5687                                   FreeRegionList* free_list,
5688                                   bool par,
5689                                   bool locked) {
5690   assert(!hr->is_free(), "the region should not be free");
5691   assert(!hr->is_empty(), "the region should not be empty");
5692   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5693   assert(free_list != NULL, "pre-condition");
5694 
5695   if (G1VerifyBitmaps) {
5696     MemRegion mr(hr->bottom(), hr->end());
5697     concurrent_mark()->clearRangePrevBitmap(mr);
5698   }


< prev index next >