< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 53923 : [mq]: 8219747-remove-g1-prefix


 996   verifier->check_bitmaps(caller);
 997 }
 998 
 999 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1000   G1CollectedHeap* _g1h;
1001   G1ConcurrentMark* _cm;
1002   HeapRegionClaimer _hrclaimer;
1003   uint volatile _total_selected_for_rebuild;
1004 
1005   G1PrintRegionLivenessInfoClosure _cl;
1006 
1007   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1008     G1CollectedHeap* _g1h;
1009     G1ConcurrentMark* _cm;
1010 
1011     G1PrintRegionLivenessInfoClosure* _cl;
1012 
1013     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1014 
1015     void update_remset_before_rebuild(HeapRegion* hr) {
1016       G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
1017 
1018       bool selected_for_rebuild;
1019       if (hr->is_humongous()) {
1020         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1021         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1022       } else {
1023         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1024         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1025       }
1026       if (selected_for_rebuild) {
1027         _num_regions_selected_for_rebuild++;
1028       }
1029       _cm->update_top_at_rebuild_start(hr);
1030     }
1031 
1032     // Distribute the given words across the humongous object starting with hr and
1033     // note end of marking.
1034     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1035       uint const region_idx = hr->hrm_index();
1036       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();


1101     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1102 
1103   virtual void work(uint worker_id) {
1104     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1105     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1106     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1107   }
1108 
1109   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1110 
1111   // Number of regions for which roughly one thread should be spawned for this work.
1112   static const uint RegionsPerThread = 384;
1113 };
1114 
1115 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1116   G1CollectedHeap* _g1h;
1117 public:
1118   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1119 
1120   virtual bool do_heap_region(HeapRegion* r) {
1121     _g1h->g1_policy()->remset_tracker()->update_after_rebuild(r);
1122     return false;
1123   }
1124 };
1125 
1126 void G1ConcurrentMark::remark() {
1127   assert_at_safepoint_on_vm_thread();
1128 
1129   // If a full collection has happened, we should not continue. However we might
1130   // have ended up here as the Remark VM operation has been scheduled already.
1131   if (has_aborted()) {
1132     return;
1133   }
1134 
1135   G1Policy* g1p = _g1h->g1_policy();
1136   g1p->record_concurrent_mark_remark_start();
1137 
1138   double start = os::elapsedTime();
1139 
1140   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1141 
1142   {
1143     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1144     finalize_marking();
1145   }
1146 
1147   double mark_work_end = os::elapsedTime();
1148 
1149   bool const mark_finished = !has_overflown();
1150   if (mark_finished) {
1151     weak_refs_work(false /* clear_all_soft_refs */);
1152 
1153     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1154     // We're done with marking.
1155     // This is the end of the marking cycle, we're expected all


1321 void G1ConcurrentMark::compute_new_sizes() {
1322   MetaspaceGC::compute_new_size();
1323 
1324   // Cleanup will have freed any regions completely full of garbage.
1325   // Update the soft reference policy with the new heap occupancy.
1326   Universe::update_heap_info_at_gc();
1327 
1328   // We reclaimed old regions so we should calculate the sizes to make
1329   // sure we update the old gen/space data.
1330   _g1h->g1mm()->update_sizes();
1331 }
1332 
1333 void G1ConcurrentMark::cleanup() {
1334   assert_at_safepoint_on_vm_thread();
1335 
1336   // If a full collection has happened, we shouldn't do this.
1337   if (has_aborted()) {
1338     return;
1339   }
1340 
1341   G1Policy* g1p = _g1h->g1_policy();
1342   g1p->record_concurrent_mark_cleanup_start();
1343 
1344   double start = os::elapsedTime();
1345 
1346   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1347 
1348   {
1349     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1350     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1351     _g1h->heap_region_iterate(&cl);
1352   }
1353 
1354   if (log_is_enabled(Trace, gc, liveness)) {
1355     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1356     _g1h->heap_region_iterate(&cl);
1357   }
1358 
1359   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1360 
1361   // We need to make this be a "collection" so any collection pause that
1362   // races with it goes around and waits for Cleanup to finish.
1363   _g1h->increment_total_collections();
1364 
1365   // Local statistics
1366   double recent_cleanup_time = (os::elapsedTime() - start);
1367   _total_cleanup_time += recent_cleanup_time;
1368   _cleanup_times.add(recent_cleanup_time);
1369 
1370   {
1371     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1372     _g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1373   }
1374 }
1375 
1376 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1377 // Uses the G1CMTask associated with a worker thread (for serial reference
1378 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1379 // trace referent objects.
1380 //
1381 // Using the G1CMTask and embedded local queues avoids having the worker
1382 // threads operating on the global mark stack. This reduces the risk
1383 // of overflowing the stack - which we would rather avoid at this late
1384 // state. Also using the tasks' local queues removes the potential
1385 // of the workers interfering with each other that could occur if
1386 // operating on the global stack.
1387 
1388 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1389   G1ConcurrentMark* _cm;
1390   G1CMTask*         _task;
1391   uint              _ref_counter_limit;
1392   uint              _ref_counter;


1976   }
1977 
1978   // Verify the task fingers
1979   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1980   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1981     G1CMTask* task = _tasks[i];
1982     HeapWord* task_finger = task->finger();
1983     if (task_finger != NULL && task_finger < _heap.end()) {
1984       // See above note on the global finger verification.
1985       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1986       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1987                 !task_hr->in_collection_set(),
1988                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1989                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1990     }
1991   }
1992 }
1993 #endif // PRODUCT
1994 
1995 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1996   _g1h->g1_rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1997 }
1998 
1999 void G1ConcurrentMark::print_stats() {
2000   if (!log_is_enabled(Debug, gc, stats)) {
2001     return;
2002   }
2003   log_debug(gc, stats)("---------------------------------------------------------------------");
2004   for (size_t i = 0; i < _num_active_tasks; ++i) {
2005     _tasks[i]->print_stats();
2006     log_debug(gc, stats)("---------------------------------------------------------------------");
2007   }
2008 }
2009 
2010 void G1ConcurrentMark::concurrent_cycle_abort() {
2011   if (!cm_thread()->during_cycle() || _has_aborted) {
2012     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2013     return;
2014   }
2015 
2016   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next


2555     The value of is_serial must be false when do_marking_step is
2556     being called by any of the worker threads in a work gang.
2557     Examples include the concurrent marking code (CMMarkingTask),
2558     the MT remark code, and the MT reference processing closures.
2559 
2560  *****************************************************************************/
2561 
2562 void G1CMTask::do_marking_step(double time_target_ms,
2563                                bool do_termination,
2564                                bool is_serial) {
2565   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2566 
2567   _start_time_ms = os::elapsedVTime() * 1000.0;
2568 
2569   // If do_stealing is true then do_marking_step will attempt to
2570   // steal work from the other G1CMTasks. It only makes sense to
2571   // enable stealing when the termination protocol is enabled
2572   // and do_marking_step() is not being called serially.
2573   bool do_stealing = do_termination && !is_serial;
2574 
2575   double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2576   _time_target_ms = time_target_ms - diff_prediction_ms;
2577 
2578   // set up the variables that are used in the work-based scheme to
2579   // call the regular clock method
2580   _words_scanned = 0;
2581   _refs_reached  = 0;
2582   recalculate_limits();
2583 
2584   // clear all flags
2585   clear_has_aborted();
2586   _has_timed_out = false;
2587   _draining_satb_buffers = false;
2588 
2589   ++_calls;
2590 
2591   // Set up the bitmap and oop closures. Anything that uses them is
2592   // eventually called from this method, so it is OK to allocate these
2593   // statically.
2594   G1CMBitMapClosure bitmap_closure(this, _cm);
2595   G1CMOopClosure cm_oop_closure(_g1h, this);




 996   verifier->check_bitmaps(caller);
 997 }
 998 
 999 class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
1000   G1CollectedHeap* _g1h;
1001   G1ConcurrentMark* _cm;
1002   HeapRegionClaimer _hrclaimer;
1003   uint volatile _total_selected_for_rebuild;
1004 
1005   G1PrintRegionLivenessInfoClosure _cl;
1006 
1007   class G1UpdateRemSetTrackingBeforeRebuild : public HeapRegionClosure {
1008     G1CollectedHeap* _g1h;
1009     G1ConcurrentMark* _cm;
1010 
1011     G1PrintRegionLivenessInfoClosure* _cl;
1012 
1013     uint _num_regions_selected_for_rebuild;  // The number of regions actually selected for rebuild.
1014 
1015     void update_remset_before_rebuild(HeapRegion* hr) {
1016       G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
1017 
1018       bool selected_for_rebuild;
1019       if (hr->is_humongous()) {
1020         bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
1021         selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
1022       } else {
1023         size_t const live_bytes = _cm->liveness(hr->hrm_index());
1024         selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
1025       }
1026       if (selected_for_rebuild) {
1027         _num_regions_selected_for_rebuild++;
1028       }
1029       _cm->update_top_at_rebuild_start(hr);
1030     }
1031 
1032     // Distribute the given words across the humongous object starting with hr and
1033     // note end of marking.
1034     void distribute_marked_bytes(HeapRegion* hr, size_t marked_words) {
1035       uint const region_idx = hr->hrm_index();
1036       size_t const obj_size_in_words = (size_t)oop(hr->bottom())->size();


1101     _g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
1102 
1103   virtual void work(uint worker_id) {
1104     G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
1105     _g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
1106     Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
1107   }
1108 
1109   uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
1110 
1111   // Number of regions for which roughly one thread should be spawned for this work.
1112   static const uint RegionsPerThread = 384;
1113 };
1114 
1115 class G1UpdateRemSetTrackingAfterRebuild : public HeapRegionClosure {
1116   G1CollectedHeap* _g1h;
1117 public:
1118   G1UpdateRemSetTrackingAfterRebuild(G1CollectedHeap* g1h) : _g1h(g1h) { }
1119 
1120   virtual bool do_heap_region(HeapRegion* r) {
1121     _g1h->policy()->remset_tracker()->update_after_rebuild(r);
1122     return false;
1123   }
1124 };
1125 
1126 void G1ConcurrentMark::remark() {
1127   assert_at_safepoint_on_vm_thread();
1128 
1129   // If a full collection has happened, we should not continue. However we might
1130   // have ended up here as the Remark VM operation has been scheduled already.
1131   if (has_aborted()) {
1132     return;
1133   }
1134 
1135   G1Policy* g1p = _g1h->policy();
1136   g1p->record_concurrent_mark_remark_start();
1137 
1138   double start = os::elapsedTime();
1139 
1140   verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "Remark before");
1141 
1142   {
1143     GCTraceTime(Debug, gc, phases) debug("Finalize Marking", _gc_timer_cm);
1144     finalize_marking();
1145   }
1146 
1147   double mark_work_end = os::elapsedTime();
1148 
1149   bool const mark_finished = !has_overflown();
1150   if (mark_finished) {
1151     weak_refs_work(false /* clear_all_soft_refs */);
1152 
1153     SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1154     // We're done with marking.
1155     // This is the end of the marking cycle, we're expected all


1321 void G1ConcurrentMark::compute_new_sizes() {
1322   MetaspaceGC::compute_new_size();
1323 
1324   // Cleanup will have freed any regions completely full of garbage.
1325   // Update the soft reference policy with the new heap occupancy.
1326   Universe::update_heap_info_at_gc();
1327 
1328   // We reclaimed old regions so we should calculate the sizes to make
1329   // sure we update the old gen/space data.
1330   _g1h->g1mm()->update_sizes();
1331 }
1332 
1333 void G1ConcurrentMark::cleanup() {
1334   assert_at_safepoint_on_vm_thread();
1335 
1336   // If a full collection has happened, we shouldn't do this.
1337   if (has_aborted()) {
1338     return;
1339   }
1340 
1341   G1Policy* g1p = _g1h->policy();
1342   g1p->record_concurrent_mark_cleanup_start();
1343 
1344   double start = os::elapsedTime();
1345 
1346   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup before");
1347 
1348   {
1349     GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking After Rebuild", _gc_timer_cm);
1350     G1UpdateRemSetTrackingAfterRebuild cl(_g1h);
1351     _g1h->heap_region_iterate(&cl);
1352   }
1353 
1354   if (log_is_enabled(Trace, gc, liveness)) {
1355     G1PrintRegionLivenessInfoClosure cl("Post-Cleanup");
1356     _g1h->heap_region_iterate(&cl);
1357   }
1358 
1359   verify_during_pause(G1HeapVerifier::G1VerifyCleanup, VerifyOption_G1UsePrevMarking, "Cleanup after");
1360 
1361   // We need to make this be a "collection" so any collection pause that
1362   // races with it goes around and waits for Cleanup to finish.
1363   _g1h->increment_total_collections();
1364 
1365   // Local statistics
1366   double recent_cleanup_time = (os::elapsedTime() - start);
1367   _total_cleanup_time += recent_cleanup_time;
1368   _cleanup_times.add(recent_cleanup_time);
1369 
1370   {
1371     GCTraceTime(Debug, gc, phases) debug("Finalize Concurrent Mark Cleanup", _gc_timer_cm);
1372     _g1h->policy()->record_concurrent_mark_cleanup_end();
1373   }
1374 }
1375 
1376 // 'Keep Alive' oop closure used by both serial parallel reference processing.
1377 // Uses the G1CMTask associated with a worker thread (for serial reference
1378 // processing the G1CMTask for worker 0 is used) to preserve (mark) and
1379 // trace referent objects.
1380 //
1381 // Using the G1CMTask and embedded local queues avoids having the worker
1382 // threads operating on the global mark stack. This reduces the risk
1383 // of overflowing the stack - which we would rather avoid at this late
1384 // state. Also using the tasks' local queues removes the potential
1385 // of the workers interfering with each other that could occur if
1386 // operating on the global stack.
1387 
1388 class G1CMKeepAliveAndDrainClosure : public OopClosure {
1389   G1ConcurrentMark* _cm;
1390   G1CMTask*         _task;
1391   uint              _ref_counter_limit;
1392   uint              _ref_counter;


1976   }
1977 
1978   // Verify the task fingers
1979   assert(_num_concurrent_workers <= _max_num_tasks, "sanity");
1980   for (uint i = 0; i < _num_concurrent_workers; ++i) {
1981     G1CMTask* task = _tasks[i];
1982     HeapWord* task_finger = task->finger();
1983     if (task_finger != NULL && task_finger < _heap.end()) {
1984       // See above note on the global finger verification.
1985       HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
1986       guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
1987                 !task_hr->in_collection_set(),
1988                 "task finger: " PTR_FORMAT " region: " HR_FORMAT,
1989                 p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
1990     }
1991   }
1992 }
1993 #endif // PRODUCT
1994 
1995 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1996   _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1997 }
1998 
1999 void G1ConcurrentMark::print_stats() {
2000   if (!log_is_enabled(Debug, gc, stats)) {
2001     return;
2002   }
2003   log_debug(gc, stats)("---------------------------------------------------------------------");
2004   for (size_t i = 0; i < _num_active_tasks; ++i) {
2005     _tasks[i]->print_stats();
2006     log_debug(gc, stats)("---------------------------------------------------------------------");
2007   }
2008 }
2009 
2010 void G1ConcurrentMark::concurrent_cycle_abort() {
2011   if (!cm_thread()->during_cycle() || _has_aborted) {
2012     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2013     return;
2014   }
2015 
2016   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next


2555     The value of is_serial must be false when do_marking_step is
2556     being called by any of the worker threads in a work gang.
2557     Examples include the concurrent marking code (CMMarkingTask),
2558     the MT remark code, and the MT reference processing closures.
2559 
2560  *****************************************************************************/
2561 
2562 void G1CMTask::do_marking_step(double time_target_ms,
2563                                bool do_termination,
2564                                bool is_serial) {
2565   assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
2566 
2567   _start_time_ms = os::elapsedVTime() * 1000.0;
2568 
2569   // If do_stealing is true then do_marking_step will attempt to
2570   // steal work from the other G1CMTasks. It only makes sense to
2571   // enable stealing when the termination protocol is enabled
2572   // and do_marking_step() is not being called serially.
2573   bool do_stealing = do_termination && !is_serial;
2574 
2575   double diff_prediction_ms = _g1h->policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
2576   _time_target_ms = time_target_ms - diff_prediction_ms;
2577 
2578   // set up the variables that are used in the work-based scheme to
2579   // call the regular clock method
2580   _words_scanned = 0;
2581   _refs_reached  = 0;
2582   recalculate_limits();
2583 
2584   // clear all flags
2585   clear_has_aborted();
2586   _has_timed_out = false;
2587   _draining_satb_buffers = false;
2588 
2589   ++_calls;
2590 
2591   // Set up the bitmap and oop closures. Anything that uses them is
2592   // eventually called from this method, so it is OK to allocate these
2593   // statically.
2594   G1CMBitMapClosure bitmap_closure(this, _cm);
2595   G1CMOopClosure cm_oop_closure(_g1h, this);


< prev index next >