< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp

Print this page
rev 9705 : [backport] Implement early update references phase.


  57   // Wait until we have the surrogate locker thread in place.
  58   {
  59     MutexLockerEx x(CGC_lock, true);
  60     while(_slt == NULL && !_should_terminate) {
  61       CGC_lock->wait(true, 200);
  62     }
  63   }
  64 
  65   ShenandoahHeap* heap = ShenandoahHeap::heap();
  66 
  67   while (! _should_terminate) {
  68     if (in_graceful_shutdown()) {
  69       break;
  70     } else if (is_full_gc()) {
  71       service_fullgc_cycle();
  72     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  73       service_normal_cycle();
  74       if (heap->is_evacuation_in_progress()) {
  75         heap->set_evacuation_in_progress_concurrently(false);
  76       }



  77     } else {
  78       Thread::current()->_ParkEvent->park(10);
  79     }
  80     heap->monitoring_support()->update_counters();
  81 
  82     // Make sure the _do_full_gc flag changes are seen.
  83     OrderAccess::storeload();
  84   }
  85 
  86   // Wait for the actual stop(), can't leave run_service() earlier.
  87   while (! _should_terminate) {
  88     Thread::current()->_ParkEvent->park(10);
  89   }
  90   terminate();
  91 }
  92 
  93 void ShenandoahConcurrentThread::service_normal_cycle() {
  94   if (check_cancellation()) return;
  95 
  96   ShenandoahHeap* heap = ShenandoahHeap::heap();
  97 
  98   GCTimer* gc_timer = heap->gc_timer();
  99   GCTracer* gc_tracer = heap->tracer();
 100 
 101   gc_timer->register_gc_start();
 102   gc_tracer->report_gc_start(GCCause::_no_cause_specified, gc_timer->gc_start());
 103 
 104   heap->shenandoahPolicy()->increase_cycle_counter();




 105 
 106   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 107   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 108 
 109   // Start initial mark under STW:
 110   {
 111     // Workers are setup by VM_ShenandoahInitMark
 112     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 113     VM_ShenandoahInitMark initMark;
 114     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 115     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
 116     VMThread::execute(&initMark);
 117     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
 118     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 119   }
 120 
 121   if (check_cancellation()) return;
 122 
 123   // Continue concurrent mark:
 124   {
 125     // Setup workers for concurrent marking phase
 126     FlexibleWorkGang* workers = heap->workers();
 127     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_marking(workers->active_workers(),
 128                                                                               (uint) Threads::number_of_non_daemon_threads());
 129     ShenandoahWorkerScope scope(workers, n_workers);
 130 
 131     GCTraceTime time("Concurrent marking", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 132     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 133     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 134   }
 135 



 136   // Possibly hand over remaining marking work to final-mark phase.
 137   bool clear_full_gc = false;
 138   if (heap->cancelled_concgc()) {
 139     heap->shenandoahPolicy()->record_cm_cancelled();
 140     if (_full_gc_cause == GCCause::_allocation_failure &&
 141         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 142       heap->clear_cancelled_concgc();
 143       clear_full_gc = true;
 144       heap->shenandoahPolicy()->record_cm_degenerated();
 145     } else {
 146       heap->gc_timer()->register_gc_end();
 147       return;
 148     }
 149   } else {
 150     heap->shenandoahPolicy()->record_cm_success();
 151   }
 152 
 153   // Proceed to complete marking under STW, and start evacuation:
 154   {
 155     // Workers are setup by VM_ShenandoahFinalMarkStartEvac


 165   if (check_cancellation()) return;
 166 
 167   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 168   if (clear_full_gc) {
 169     reset_full_gc();
 170   }
 171 
 172   // Continue concurrent evacuation:
 173   {
 174     // Setup workers for concurrent evacuation phase
 175     FlexibleWorkGang* workers = heap->workers();
 176     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(workers->active_workers(),
 177                                                                                  (uint) Threads::number_of_non_daemon_threads());
 178     ShenandoahWorkerScope scope(workers, n_workers);
 179 
 180     GCTraceTime time("Concurrent evacuation", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 181     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 182     heap->do_evacuation();
 183   }
 184 















































 185   // Prepare for the next normal cycle:
 186   if (check_cancellation()) return;
 187 




 188   {
 189     GCTraceTime time("Concurrent reset bitmaps", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id());
 190     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 191     FlexibleWorkGang* workers = heap->workers();
 192     ShenandoahPushWorkerScope scope(workers, heap->max_workers());
 193     heap->reset_next_mark_bitmap(workers);
 194     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 195   }






 196 
 197   gc_timer->register_gc_end();
 198   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 199 }
 200 
 201 bool ShenandoahConcurrentThread::check_cancellation() {
 202   ShenandoahHeap* heap = ShenandoahHeap::heap();
 203   if (heap->cancelled_concgc()) {
 204     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 205     heap->gc_timer()->register_gc_end();
 206     return true;
 207   }
 208   return false;
 209 }
 210 
 211 
 212 void ShenandoahConcurrentThread::stop() {
 213   {
 214     MutexLockerEx ml(Terminator_lock);
 215     _should_terminate = true;




  57   // Wait until we have the surrogate locker thread in place.
  58   {
  59     MutexLockerEx x(CGC_lock, true);
  60     while(_slt == NULL && !_should_terminate) {
  61       CGC_lock->wait(true, 200);
  62     }
  63   }
  64 
  65   ShenandoahHeap* heap = ShenandoahHeap::heap();
  66 
  67   while (! _should_terminate) {
  68     if (in_graceful_shutdown()) {
  69       break;
  70     } else if (is_full_gc()) {
  71       service_fullgc_cycle();
  72     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  73       service_normal_cycle();
  74       if (heap->is_evacuation_in_progress()) {
  75         heap->set_evacuation_in_progress_concurrently(false);
  76       }
  77       if (heap->is_update_refs_in_progress()) {
  78         heap->set_update_refs_in_progress(false);
  79       }
  80     } else {
  81       Thread::current()->_ParkEvent->park(10);
  82     }
  83     heap->monitoring_support()->update_counters();
  84 
  85     // Make sure the _do_full_gc flag changes are seen.
  86     OrderAccess::storeload();
  87   }
  88 
  89   // Wait for the actual stop(), can't leave run_service() earlier.
  90   while (! _should_terminate) {
  91     Thread::current()->_ParkEvent->park(10);
  92   }
  93   terminate();
  94 }
  95 
  96 void ShenandoahConcurrentThread::service_normal_cycle() {
  97   if (check_cancellation()) return;
  98 
  99   ShenandoahHeap* heap = ShenandoahHeap::heap();
 100 
 101   GCTimer* gc_timer = heap->gc_timer();
 102   GCTracer* gc_tracer = heap->tracer();
 103 
 104   gc_timer->register_gc_start();
 105   gc_tracer->report_gc_start(GCCause::_no_cause_specified, gc_timer->gc_start());
 106 
 107   // Cycle started
 108   heap->shenandoahPolicy()->record_cycle_start();
 109 
 110   // Capture peak occupancy right after starting the cycle
 111   heap->shenandoahPolicy()->record_peak_occupancy();
 112 
 113   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 114   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 115 
 116   // Start initial mark under STW:
 117   {
 118     // Workers are setup by VM_ShenandoahInitMark
 119     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 120     VM_ShenandoahInitMark initMark;
 121     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 122     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
 123     VMThread::execute(&initMark);
 124     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
 125     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 126   }
 127 
 128   if (check_cancellation()) return;
 129 
 130   // Continue concurrent mark:
 131   {
 132     // Setup workers for concurrent marking phase
 133     FlexibleWorkGang* workers = heap->workers();
 134     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_marking(workers->active_workers(),
 135                                                                               (uint) Threads::number_of_non_daemon_threads());
 136     ShenandoahWorkerScope scope(workers, n_workers);
 137 
 138     GCTraceTime time("Concurrent marking", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 139     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 140     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 141   }
 142 
 143   // Allocations happen during concurrent mark, record peak after the phase:
 144   heap->shenandoahPolicy()->record_peak_occupancy();
 145 
 146   // Possibly hand over remaining marking work to final-mark phase.
 147   bool clear_full_gc = false;
 148   if (heap->cancelled_concgc()) {
 149     heap->shenandoahPolicy()->record_cm_cancelled();
 150     if (_full_gc_cause == GCCause::_allocation_failure &&
 151         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 152       heap->clear_cancelled_concgc();
 153       clear_full_gc = true;
 154       heap->shenandoahPolicy()->record_cm_degenerated();
 155     } else {
 156       heap->gc_timer()->register_gc_end();
 157       return;
 158     }
 159   } else {
 160     heap->shenandoahPolicy()->record_cm_success();
 161   }
 162 
 163   // Proceed to complete marking under STW, and start evacuation:
 164   {
 165     // Workers are setup by VM_ShenandoahFinalMarkStartEvac


 175   if (check_cancellation()) return;
 176 
 177   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 178   if (clear_full_gc) {
 179     reset_full_gc();
 180   }
 181 
 182   // Continue concurrent evacuation:
 183   {
 184     // Setup workers for concurrent evacuation phase
 185     FlexibleWorkGang* workers = heap->workers();
 186     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(workers->active_workers(),
 187                                                                                  (uint) Threads::number_of_non_daemon_threads());
 188     ShenandoahWorkerScope scope(workers, n_workers);
 189 
 190     GCTraceTime time("Concurrent evacuation", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 191     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 192     heap->do_evacuation();
 193   }
 194 
 195   // Allocations happen during evacuation, record peak after the phase:
 196   heap->shenandoahPolicy()->record_peak_occupancy();
 197 
 198   // Do an update-refs phase if required.
 199   if (check_cancellation()) return;
 200 
 201   if (heap->shenandoahPolicy()->should_start_update_refs()) {
 202 
 203     VM_ShenandoahInitUpdateRefs init_update_refs;
 204     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 205     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_update_refs_gross);
 206     VMThread::execute(&init_update_refs);
 207     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_update_refs_gross);
 208     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 209 
 210     {
 211       GCTraceTime time("Concurrent update references ", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 212       heap->concurrent_update_heap_references();
 213     }
 214 
 215     // Allocations happen during update-refs, record peak after the phase:
 216     heap->shenandoahPolicy()->record_peak_occupancy();
 217 
 218     clear_full_gc = false;
 219     if (heap->cancelled_concgc()) {
 220       heap->shenandoahPolicy()->record_uprefs_cancelled();
 221       if (_full_gc_cause == GCCause::_allocation_failure &&
 222           heap->shenandoahPolicy()->handover_cancelled_uprefs()) {
 223         clear_full_gc = true;
 224         heap->shenandoahPolicy()->record_uprefs_degenerated();
 225       } else {
 226         heap->gc_timer()->register_gc_end();
 227         return;
 228       }
 229     } else {
 230       heap->shenandoahPolicy()->record_uprefs_success();
 231     }
 232 
 233     VM_ShenandoahFinalUpdateRefs final_update_refs;
 234 
 235     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 236     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_update_refs_gross);
 237     VMThread::execute(&final_update_refs);
 238     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_update_refs_gross);
 239     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 240   }
 241 
 242   // Prepare for the next normal cycle:
 243   if (check_cancellation()) return;
 244 
 245   if (clear_full_gc) {
 246     reset_full_gc();
 247   }
 248 
 249   {
 250     GCTraceTime time("Concurrent reset bitmaps", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id());
 251     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 252     FlexibleWorkGang* workers = heap->workers();
 253     ShenandoahPushWorkerScope scope(workers, heap->max_workers());
 254     heap->reset_next_mark_bitmap(workers);
 255     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 256   }
 257 
 258   // Allocations happen during bitmap cleanup, record peak after the phase:
 259   heap->shenandoahPolicy()->record_peak_occupancy();
 260 
 261   // Cycle is complete
 262   heap->shenandoahPolicy()->record_cycle_end();
 263 
 264   gc_timer->register_gc_end();
 265   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 266 }
 267 
 268 bool ShenandoahConcurrentThread::check_cancellation() {
 269   ShenandoahHeap* heap = ShenandoahHeap::heap();
 270   if (heap->cancelled_concgc()) {
 271     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 272     heap->gc_timer()->register_gc_end();
 273     return true;
 274   }
 275   return false;
 276 }
 277 
 278 
 279 void ShenandoahConcurrentThread::stop() {
 280   {
 281     MutexLockerEx ml(Terminator_lock);
 282     _should_terminate = true;


< prev index next >