< prev index next >

src/share/vm/gc/g1/concurrentMarkThread.cpp

Print this page
rev 11747 : [mq]: per.hotspot.patch


 158 
 159       int iter = 0;
 160       do {
 161         iter++;
 162         if (!cm()->has_aborted()) {
 163           G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
 164           _cm->mark_from_roots();
 165         }
 166 
 167         double mark_end_time = os::elapsedVTime();
 168         jlong mark_end = os::elapsed_counter();
 169         _vtime_mark_accum += (mark_end_time - cycle_start);
 170         if (!cm()->has_aborted()) {
 171           delay_to_keep_mmu(g1_policy, true /* remark */);
 172           log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
 173                                 TimeHelper::counter_to_seconds(mark_start),
 174                                 TimeHelper::counter_to_seconds(mark_end),
 175                                 TimeHelper::counter_to_millis(mark_end - mark_start));
 176 
 177           CMCheckpointRootsFinalClosure final_cl(_cm);
 178           VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
 179           VMThread::execute(&op);
 180         }
 181         if (cm()->restart_for_overflow()) {
 182           log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
 183           log_info(gc, marking)("Concurrent Mark Restart due to overflow");
 184         }
 185       } while (cm()->restart_for_overflow());
 186 
 187       if (!cm()->has_aborted()) {
 188         G1ConcPhaseTimer t(_cm, "Concurrent Create Live Data");
 189         cm()->create_live_data();
 190       }
 191 
 192       double end_time = os::elapsedVTime();
 193       // Update the total virtual time before doing this, since it will try
 194       // to measure it to get the vtime for this marking.  We purposely
 195       // neglect the presumably-short "completeCleanup" phase here.
 196       _vtime_accum = (end_time - _vtime_start);
 197 
 198       if (!cm()->has_aborted()) {
 199         delay_to_keep_mmu(g1_policy, false /* cleanup */);
 200 
 201         CMCleanUp cl_cl(_cm);
 202         VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */);
 203         VMThread::execute(&op);
 204       } else {
 205         // We don't want to update the marking status if a GC pause
 206         // is already underway.
 207         SuspendibleThreadSetJoiner sts_join;
 208         g1h->collector_state()->set_mark_in_progress(false);
 209       }
 210 
 211       // Check if cleanup set the free_regions_coming flag. If it
 212       // hasn't, we can just skip the next step.
 213       if (g1h->free_regions_coming()) {
 214         // The following will finish freeing up any regions that we
 215         // found to be empty during cleanup. We'll do this part
 216         // without joining the suspendible set. If an evacuation pause
 217         // takes place, then we would carry on freeing regions in
 218         // case they are needed by the pause. If a Full GC takes
 219         // place, it would wait for us to process the regions
 220         // reclaimed by cleanup.
 221 
 222         G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");




 158 
 159       int iter = 0;
 160       do {
 161         iter++;
 162         if (!cm()->has_aborted()) {
 163           G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
 164           _cm->mark_from_roots();
 165         }
 166 
 167         double mark_end_time = os::elapsedVTime();
 168         jlong mark_end = os::elapsed_counter();
 169         _vtime_mark_accum += (mark_end_time - cycle_start);
 170         if (!cm()->has_aborted()) {
 171           delay_to_keep_mmu(g1_policy, true /* remark */);
 172           log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
 173                                 TimeHelper::counter_to_seconds(mark_start),
 174                                 TimeHelper::counter_to_seconds(mark_end),
 175                                 TimeHelper::counter_to_millis(mark_end - mark_start));
 176 
 177           CMCheckpointRootsFinalClosure final_cl(_cm);
 178           VM_CGC_Operation op(&final_cl, "Pause Remark");
 179           VMThread::execute(&op);
 180         }
 181         if (cm()->restart_for_overflow()) {
 182           log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
 183           log_info(gc, marking)("Concurrent Mark Restart due to overflow");
 184         }
 185       } while (cm()->restart_for_overflow());
 186 
 187       if (!cm()->has_aborted()) {
 188         G1ConcPhaseTimer t(_cm, "Concurrent Create Live Data");
 189         cm()->create_live_data();
 190       }
 191 
 192       double end_time = os::elapsedVTime();
 193       // Update the total virtual time before doing this, since it will try
 194       // to measure it to get the vtime for this marking.  We purposely
 195       // neglect the presumably-short "completeCleanup" phase here.
 196       _vtime_accum = (end_time - _vtime_start);
 197 
 198       if (!cm()->has_aborted()) {
 199         delay_to_keep_mmu(g1_policy, false /* cleanup */);
 200 
 201         CMCleanUp cl_cl(_cm);
 202         VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
 203         VMThread::execute(&op);
 204       } else {
 205         // We don't want to update the marking status if a GC pause
 206         // is already underway.
 207         SuspendibleThreadSetJoiner sts_join;
 208         g1h->collector_state()->set_mark_in_progress(false);
 209       }
 210 
 211       // Check if cleanup set the free_regions_coming flag. If it
 212       // hasn't, we can just skip the next step.
 213       if (g1h->free_regions_coming()) {
 214         // The following will finish freeing up any regions that we
 215         // found to be empty during cleanup. We'll do this part
 216         // without joining the suspendible set. If an evacuation pause
 217         // takes place, then we would carry on freeing regions in
 218         // case they are needed by the pause. If a Full GC takes
 219         // place, it would wait for us to process the regions
 220         // reclaimed by cleanup.
 221 
 222         G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");


< prev index next >