< prev index next >

src/share/vm/gc/g1/concurrentMarkThread.cpp

Print this page




  75   CMCleanUp(G1ConcurrentMark* cm) :
  76     _cm(cm) {}
  77 
  78   void do_void(){
  79     _cm->cleanup();
  80   }
  81 };
  82 
  83 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
  84 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
  85   if (g1_policy->adaptive_young_list_length()) {
  86     double now = os::elapsedTime();
  87     double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
  88                                   : g1_policy->predict_cleanup_time_ms();
  89     G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
  90     jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
  91     os::sleep(this, sleep_time_ms, false);
  92   }
  93 }
  94 
  95 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking> {
  96   G1ConcurrentMark* _cm;
  97 
  98  public:
  99   G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
 100      GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
 101      _cm(cm) {
 102     _cm->register_concurrent_phase_start(title);
 103   }
 104 
 105   ~G1ConcPhaseTimer() {
 106     _cm->register_concurrent_phase_end();
 107   }
 108 };
 109 
 110 void ConcurrentMarkThread::run_service() {
 111   _vtime_start = os::elapsedVTime();
 112 
 113   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 114   G1CollectorPolicy* g1_policy = g1h->g1_policy();
 115 
 116   while (!should_terminate()) {
 117     // wait until started is set.
 118     sleepBeforeNextCycle();
 119     if (should_terminate()) {
 120       _cm->root_regions()->cancel_scan();
 121       break;
 122     }
 123 
 124     assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");


 125     {
 126       ResourceMark rm;
 127       HandleMark   hm;
 128       double cycle_start = os::elapsedVTime();
 129 
 130       {
 131         G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks");
 132         ClassLoaderDataGraph::clear_claimed_marks();
 133       }
 134 
 135       // We have to ensure that we finish scanning the root regions
 136       // before the next GC takes place. To ensure this we have to
 137       // make sure that we do not join the STS until the root regions
 138       // have been scanned. If we did then it's possible that a
 139       // subsequent GC could block us from joining the STS and proceed
 140       // without the root regions have been scanned which would be a
 141       // correctness issue.
 142 
 143       {
 144         G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");


 193         VMThread::execute(&op);
 194       } else {
 195         // We don't want to update the marking status if a GC pause
 196         // is already underway.
 197         SuspendibleThreadSetJoiner sts_join;
 198         g1h->collector_state()->set_mark_in_progress(false);
 199       }
 200 
 201       // Check if cleanup set the free_regions_coming flag. If it
 202       // hasn't, we can just skip the next step.
 203       if (g1h->free_regions_coming()) {
 204         // The following will finish freeing up any regions that we
 205         // found to be empty during cleanup. We'll do this part
 206         // without joining the suspendible set. If an evacuation pause
 207         // takes place, then we would carry on freeing regions in
 208         // case they are needed by the pause. If a Full GC takes
 209         // place, it would wait for us to process the regions
 210         // reclaimed by cleanup.
 211 
 212         G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");
 213 
 214         // Now do the concurrent cleanup operation.
 215         _cm->complete_cleanup();
 216 
 217         // Notify anyone who's waiting that there are no more free
 218         // regions coming. We have to do this before we join the STS
 219         // (in fact, we should not attempt to join the STS in the
 220         // interval between finishing the cleanup pause and clearing
 221         // the free_regions_coming flag) otherwise we might deadlock:
 222         // a GC worker could be blocked waiting for the notification
 223         // whereas this thread will be blocked for the pause to finish
 224         // while it's trying to join the STS, which is conditional on
 225         // the GC workers finishing.
 226         g1h->reset_free_regions_coming();
 227       }
 228       guarantee(cm()->cleanup_list_is_empty(),
 229                 "at this point there should be no regions on the cleanup list");
 230 
 231       // There is a tricky race before recording that the concurrent
 232       // cleanup has completed and a potential Full GC starting around
 233       // the same time. We want to make sure that the Full GC calls




  75   CMCleanUp(G1ConcurrentMark* cm) :
  76     _cm(cm) {}
  77 
  78   void do_void(){
  79     _cm->cleanup();
  80   }
  81 };
  82 
  83 // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
  84 void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
  85   if (g1_policy->adaptive_young_list_length()) {
  86     double now = os::elapsedTime();
  87     double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
  88                                   : g1_policy->predict_cleanup_time_ms();
  89     G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
  90     jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
  91     os::sleep(this, sleep_time_ms, false);
  92   }
  93 }
  94 
  95 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
  96   G1ConcurrentMark* _cm;
  97 
  98  public:
  99   G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
 100      GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
 101      _cm(cm) {
 102     _cm->register_concurrent_phase_start(title);
 103   }
 104 
 105   ~G1ConcPhaseTimer() {
 106     _cm->register_concurrent_phase_end();
 107   }
 108 };
 109 
 110 void ConcurrentMarkThread::run_service() {
 111   _vtime_start = os::elapsedVTime();
 112 
 113   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 114   G1CollectorPolicy* g1_policy = g1h->g1_policy();
 115 
 116   while (!should_terminate()) {
 117     // wait until started is set.
 118     sleepBeforeNextCycle();
 119     if (should_terminate()) {
 120       _cm->root_regions()->cancel_scan();
 121       break;
 122     }
 123 
 124     assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
 125 
 126     GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
 127     {
 128       ResourceMark rm;
 129       HandleMark   hm;
 130       double cycle_start = os::elapsedVTime();
 131 
 132       {
 133         G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks");
 134         ClassLoaderDataGraph::clear_claimed_marks();
 135       }
 136 
 137       // We have to ensure that we finish scanning the root regions
 138       // before the next GC takes place. To ensure this we have to
 139       // make sure that we do not join the STS until the root regions
 140       // have been scanned. If we did then it's possible that a
 141       // subsequent GC could block us from joining the STS and proceed
 142       // without the root regions have been scanned which would be a
 143       // correctness issue.
 144 
 145       {
 146         G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");


 195         VMThread::execute(&op);
 196       } else {
 197         // We don't want to update the marking status if a GC pause
 198         // is already underway.
 199         SuspendibleThreadSetJoiner sts_join;
 200         g1h->collector_state()->set_mark_in_progress(false);
 201       }
 202 
 203       // Check if cleanup set the free_regions_coming flag. If it
 204       // hasn't, we can just skip the next step.
 205       if (g1h->free_regions_coming()) {
 206         // The following will finish freeing up any regions that we
 207         // found to be empty during cleanup. We'll do this part
 208         // without joining the suspendible set. If an evacuation pause
 209         // takes place, then we would carry on freeing regions in
 210         // case they are needed by the pause. If a Full GC takes
 211         // place, it would wait for us to process the regions
 212         // reclaimed by cleanup.
 213 
 214         G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");

 215         // Now do the concurrent cleanup operation.
 216         _cm->complete_cleanup();
 217 
 218         // Notify anyone who's waiting that there are no more free
 219         // regions coming. We have to do this before we join the STS
 220         // (in fact, we should not attempt to join the STS in the
 221         // interval between finishing the cleanup pause and clearing
 222         // the free_regions_coming flag) otherwise we might deadlock:
 223         // a GC worker could be blocked waiting for the notification
 224         // whereas this thread will be blocked for the pause to finish
 225         // while it's trying to join the STS, which is conditional on
 226         // the GC workers finishing.
 227         g1h->reset_free_regions_coming();
 228       }
 229       guarantee(cm()->cleanup_list_is_empty(),
 230                 "at this point there should be no regions on the cleanup list");
 231 
 232       // There is a tricky race before recording that the concurrent
 233       // cleanup has completed and a potential Full GC starting around
 234       // the same time. We want to make sure that the Full GC calls


< prev index next >