Print this page
Abort concurrent mark

Split Close
Expand all
Collapse all
          --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
          +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
↓ open down ↓ 1016 lines elided ↑ open up ↑
1017 1017    _cm->root_regions()->wait_until_scan_finished();
1018 1018  
1019 1019    // Disable discovery and empty the discovered lists
1020 1020    // for the CM ref processor.
1021 1021    _ref_processor_cm->disable_discovery();
1022 1022    _ref_processor_cm->abandon_partial_discovery();
1023 1023    _ref_processor_cm->verify_no_references_recorded();
1024 1024  
1025 1025    // Abandon current iterations of concurrent marking and concurrent
1026 1026    // refinement, if any are in progress.
1027      -  concurrent_mark()->concurrent_cycle_abort();
     1027 +  concurrent_mark()->concurrent_cycle_abort_by_fullgc();
1028 1028  }
1029 1029  
1030 1030  void G1CollectedHeap::prepare_heap_for_full_collection() {
1031 1031    // Make sure we'll choose a new allocation region afterwards.
1032 1032    _allocator->release_mutator_alloc_regions();
1033 1033    _allocator->abandon_gc_alloc_regions();
1034 1034  
1035 1035    // We may have added regions to the current incremental collection
1036 1036    // set between the last GC or pause and now. We need to clear the
1037 1037    // incremental collection set and then start rebuilding it afresh
↓ open down ↓ 2091 lines elided ↑ open up ↑
3129 3129          if (_collection_set.optional_region_length() != 0) {
3130 3130            evacuate_optional_collection_set(&per_thread_states);
3131 3131          }
3132 3132          post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3133 3133  
3134 3134          start_new_collection_set();
3135 3135  
3136 3136          _survivor_evac_stats.adjust_desired_plab_sz();
3137 3137          _old_evac_stats.adjust_desired_plab_sz();
3138 3138  
3139      -        if (should_start_conc_mark) {
     3139 +        if (gc_cause() == GCCause::_g1_humongous_allocation && collector_state()->in_initial_mark_gc()) {
     3140 +          // Check if we still need to do concurrent mark after evacuation
     3141 +          // Abort concurrent mark in case we cleaned humongous objects via eager reclaim
     3142 +          if (!policy()->need_to_start_conc_mark("end of GC")) {
     3143 +            concurrent_mark()->concurrent_cycle_abort_by_initial_mark();
     3144 +          }
     3145 +        }
     3146 +
     3147 +        if (should_start_conc_mark && !concurrent_mark()->aborted_by_initial_mark()) {
3140 3148            // We have to do this before we notify the CM threads that
3141 3149            // they can start working to make sure that all the
3142 3150            // appropriate initialization is done on the CM object.
3143 3151            concurrent_mark()->post_initial_mark();
3144 3152            // Note that we don't actually trigger the CM thread at
3145 3153            // this point. We do that later when we're sure that
3146 3154            // the current thread has completed its logging output.
3147 3155          }
3148 3156  
3149 3157          allocate_dummy_regions();
↓ open down ↓ 1862 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX