< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page




 912            "Maximum number of marking threads exceeded");
 913 
 914     G1CMRootRegionScanTask task(this);
 915     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 916                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 917     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 918 
 919     // It's possible that has_aborted() is true here without actually
 920     // aborting the survivor scan earlier. This is OK as it's
 921     // mainly used for sanity checking.
 922     root_regions()->scan_finished();
 923   }
 924 }
 925 
 926 void G1ConcurrentMark::concurrent_cycle_start() {
 927   _gc_timer_cm->register_gc_start();
 928 
 929   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 930 
 931   _g1h->trace_heap_before_gc(_gc_tracer_cm);


 932 }
 933 
 934 void G1ConcurrentMark::concurrent_cycle_end() {
 935   _g1h->collector_state()->set_clearing_next_bitmap(false);
 936 
 937   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 938 
 939   if (has_aborted()) {
 940     log_info(gc, marking)("Concurrent Mark Abort");
 941     _gc_tracer_cm->report_concurrent_mode_failure();
 942   }
 943 
 944   _gc_timer_cm->register_gc_end();
 945 
 946   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());


 947 }
 948 
 949 void G1ConcurrentMark::mark_from_roots() {
 950   _restart_for_overflow = false;
 951 
 952   _num_concurrent_workers = calc_active_marking_workers();
 953 
 954   uint active_workers = MAX2(1U, _num_concurrent_workers);
 955 
 956   // Setting active workers is not guaranteed since fewer
 957   // worker threads may currently exist and more may not be
 958   // available.
 959   active_workers = _concurrent_workers->update_active_workers(active_workers);
 960   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 961 
 962   // Parallel task terminator is set in "set_concurrency_and_phase()"
 963   set_concurrency_and_phase(active_workers, true /* concurrent */);
 964 
 965   G1CMConcurrentMarkingTask marking_task(this);
 966   _concurrent_workers->run_task(&marking_task);


1104 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1105   G1CollectedHeap* _g1h;
1106 public:
1107   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1108 
1109   virtual bool do_heap_region(HeapRegion* r) {
1110     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1111     return false;
1112   }
1113 };
1114 
1115 void G1ConcurrentMark::remark() {
1116   assert_at_safepoint_on_vm_thread();
1117 
1118   // If a full collection has happened, we should not continue. However we might
1119   // have ended up here as the Remark VM operation has been scheduled already.
1120   if (has_aborted()) {
1121     return;
1122   }
1123 


1124   G1Policy* g1p = _g1h->g1_policy();
1125   g1p->record_concurrent_mark_remark_start();
1126 
1127   double start = os::elapsedTime();
1128 
1129   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1130 
1131   {
1132     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1133     finalize_marking();
1134   }
1135 
1136   double mark_work_end = os::elapsedTime();
1137 
1138   bool const mark_finished = !has_overflown();
1139   if (mark_finished) {
1140     weak_refs_work(false /* clear_all_soft_refs */);
1141 
1142     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1143     // We're done with marking.


1320 void G1ConcurrentMark::compute_new_sizes() {
1321   MetaspaceGC::compute_new_size();
1322 
1323   // Cleanup will have freed any regions completely full of garbage.
1324   // Update the soft reference policy with the new heap occupancy.
1325   Universe::update_heap_info_at_gc();
1326 
1327   // We reclaimed old regions so we should calculate the sizes to make
1328   // sure we update the old gen/space data.
1329   _g1h->g1mm()->update_sizes();
1330 }
1331 
1332 void G1ConcurrentMark::cleanup() {
1333   assert_at_safepoint_on_vm_thread();
1334 
1335   // If a full collection has happened, we shouldn't do this.
1336   if (has_aborted()) {
1337     return;
1338   }
1339 


1340   G1Policy* g1p = _g1h->g1_policy();
1341   g1p->record_concurrent_mark_cleanup_start();
1342 
1343   double start = os::elapsedTime();
1344 
1345   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1346 
1347   {
1348     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1349     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1350     _g1h->heap_region_iterate(&cl);
1351   }
1352 
1353   if (log_is_enabled(Trace, gc, liveness)) {
1354     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1355     _g1h->heap_region_iterate(&cl);
1356   }
1357 
1358   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1359 




 912            "Maximum number of marking threads exceeded");
 913 
 914     G1CMRootRegionScanTask task(this);
 915     log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
 916                         task.name(), _num_concurrent_workers, root_regions()->num_root_regions());
 917     _concurrent_workers->run_task(&task, _num_concurrent_workers);
 918 
 919     // It's possible that has_aborted() is true here without actually
 920     // aborting the survivor scan earlier. This is OK as it's
 921     // mainly used for sanity checking.
 922     root_regions()->scan_finished();
 923   }
 924 }
 925 
 926 void G1ConcurrentMark::concurrent_cycle_start() {
 927   _gc_timer_cm->register_gc_start();
 928 
 929   _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
 930 
 931   _g1h->trace_heap_before_gc(_gc_tracer_cm);
 932   // Record start, but take no time
 933   TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::CycleStart, _g1h->gc_cause());
 934 }
 935 
 936 void G1ConcurrentMark::concurrent_cycle_end() {
 937   _g1h->collector_state()->set_clearing_next_bitmap(false);
 938 
 939   _g1h->trace_heap_after_gc(_gc_tracer_cm);
 940 
 941   if (has_aborted()) {
 942     log_info(gc, marking)("Concurrent Mark Abort");
 943     _gc_tracer_cm->report_concurrent_mode_failure();
 944   }
 945 
 946   _gc_timer_cm->register_gc_end();
 947 
 948   _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
 949   // Record end, but take no time
 950   TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::CycleEnd, _g1h->gc_cause());
 951 }
 952 
 953 void G1ConcurrentMark::mark_from_roots() {
 954   _restart_for_overflow = false;
 955 
 956   _num_concurrent_workers = calc_active_marking_workers();
 957 
 958   uint active_workers = MAX2(1U, _num_concurrent_workers);
 959 
 960   // Setting active workers is not guaranteed since fewer
 961   // worker threads may currently exist and more may not be
 962   // available.
 963   active_workers = _concurrent_workers->update_active_workers(active_workers);
 964   log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
 965 
 966   // Parallel task terminator is set in "set_concurrency_and_phase()"
 967   set_concurrency_and_phase(active_workers, true /* concurrent */);
 968 
 969   G1CMConcurrentMarkingTask marking_task(this);
 970   _concurrent_workers->run_task(&marking_task);


1108 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1109   G1CollectedHeap* _g1h;
1110 public:
1111   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1112 
1113   virtual bool do_heap_region(HeapRegion* r) {
1114     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1115     return false;
1116   }
1117 };
1118 
1119 void G1ConcurrentMark::remark() {
1120   assert_at_safepoint_on_vm_thread();
1121 
1122   // If a full collection has happened, we should not continue. However we might
1123   // have ended up here as the Remark VM operation has been scheduled already.
1124   if (has_aborted()) {
1125     return;
1126   }
1127 
1128   TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::Remark, _g1h->gc_cause());
1129 
1130   G1Policy* g1p = _g1h->g1_policy();
1131   g1p->record_concurrent_mark_remark_start();
1132 
1133   double start = os::elapsedTime();
1134 
1135   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1136 
1137   {
1138     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1139     finalize_marking();
1140   }
1141 
1142   double mark_work_end = os::elapsedTime();
1143 
1144   bool const mark_finished = !has_overflown();
1145   if (mark_finished) {
1146     weak_refs_work(false /* clear_all_soft_refs */);
1147 
1148     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1149     // We're done with marking.


1326 void G1ConcurrentMark::compute_new_sizes() {
1327   MetaspaceGC::compute_new_size();
1328 
1329   // Cleanup will have freed any regions completely full of garbage.
1330   // Update the soft reference policy with the new heap occupancy.
1331   Universe::update_heap_info_at_gc();
1332 
1333   // We reclaimed old regions so we should calculate the sizes to make
1334   // sure we update the old gen/space data.
1335   _g1h->g1mm()->update_sizes();
1336 }
1337 
1338 void G1ConcurrentMark::cleanup() {
1339   assert_at_safepoint_on_vm_thread();
1340 
1341   // If a full collection has happened, we shouldn't do this.
1342   if (has_aborted()) {
1343     return;
1344   }
1345 
1346   TraceConcMemoryManagerStats tms(TraceConcMemoryManagerStats::Cleanup, _g1h->gc_cause());
1347 
1348   G1Policy* g1p = _g1h->g1_policy();
1349   g1p->record_concurrent_mark_cleanup_start();
1350 
1351   double start = os::elapsedTime();
1352 
1353   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1354 
1355   {
1356     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1357     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1358     _g1h->heap_region_iterate(&cl);
1359   }
1360 
1361   if (log_is_enabled(Trace, gc, liveness)) {
1362     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1363     _g1h->heap_region_iterate(&cl);
1364   }
1365 
1366   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1367 


< prev index next >