< prev index next >

src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp

Print this page
rev 14453 : Remove secondary marking bitmap.


 159 
 160 void ShenandoahConcurrentThread::service_normal_cycle() {
 161   if (check_cancellation()) return;
 162 
 163   ShenandoahHeap* heap = ShenandoahHeap::heap();
 164 
 165   GCTimer* gc_timer = heap->gc_timer();
 166 
 167   ShenandoahGCSession session;
 168 
 169   // Cycle started
 170   heap->shenandoahPolicy()->record_cycle_start();
 171 
 172   // Capture peak occupancy right after starting the cycle
 173   heap->shenandoahPolicy()->record_peak_occupancy();
 174 
 175   GCIdMark gc_id_mark;
 176   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 177   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 178 












 179   // Start initial mark under STW:
 180   {
 181     // Workers are setup by VM_ShenandoahInitMark
 182     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 183     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 184     ShenandoahGCPhase init_mark_phase(ShenandoahPhaseTimings::init_mark_gross);
 185     VM_ShenandoahInitMark initMark;
 186     VMThread::execute(&initMark);
 187   }
 188 
 189   if (check_cancellation()) return;
 190 
 191   // Continue concurrent mark:
 192   {
 193     // Setup workers for concurrent marking phase
 194     WorkGang* workers = heap->workers();
 195     uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_marking();
 196     ShenandoahWorkerScope scope(workers, n_workers);
 197 
 198     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);


 309     if (heap->cancelled_concgc()) {
 310       heap->shenandoahPolicy()->record_uprefs_cancelled();
 311       if (_full_gc_cause == GCCause::_allocation_failure &&
 312           heap->shenandoahPolicy()->handover_cancelled_uprefs()) {
 313         clear_full_gc = true;
 314         heap->shenandoahPolicy()->record_uprefs_degenerated();
 315       } else {
 316         return;
 317       }
 318     } else {
 319       heap->shenandoahPolicy()->record_uprefs_success();
 320     }
 321 
 322     if (do_it) {
 323       TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 324       ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
 325       ShenandoahGCPhase final_update_refs_phase(ShenandoahPhaseTimings::final_update_refs_gross);
 326       VM_ShenandoahFinalUpdateRefs final_update_refs;
 327       VMThread::execute(&final_update_refs);
 328     }











 329   } else {
 330     // If update-refs were skipped, need to do another verification pass after evacuation.
 331     if (ShenandoahVerify && !check_cancellation()) {
 332       VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
 333       VMThread::execute(&verify_after_evacuation);
 334     }
 335   }
 336 
 337   // Prepare for the next normal cycle:
 338   if (check_cancellation()) return;
 339 
 340   if (clear_full_gc) {
 341     reset_full_gc();
 342   }
 343 
 344   {
 345     GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 346     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 347 
 348     {
 349       ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 350       heap->recycle_trash();
 351     }
 352 
 353     {
 354       ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
 355       WorkGang *workers = heap->workers();
 356       ShenandoahPushWorkerScope scope(workers, ConcGCThreads);
 357       heap->reset_next_mark_bitmap(workers);
 358     }
 359   }
 360 
 361   // Allocations happen during bitmap cleanup, record peak after the phase:
 362   heap->shenandoahPolicy()->record_peak_occupancy();
 363 
 364   // Cycle is complete
 365   heap->shenandoahPolicy()->record_cycle_end();
 366 
 367   // TODO: Call this properly with Shenandoah*CycleMark
 368   heap->set_used_at_last_gc();
 369 }
 370 
 371 bool ShenandoahConcurrentThread::check_cancellation() {
 372   ShenandoahHeap* heap = ShenandoahHeap::heap();
 373   if (heap->cancelled_concgc()) {
 374     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 375     return true;
 376   }
 377   return false;
 378 }
 379 
 380 
 381 void ShenandoahConcurrentThread::stop_service() {
 382   // Nothing to do here.




 159 
 160 void ShenandoahConcurrentThread::service_normal_cycle() {
 161   if (check_cancellation()) return;
 162 
 163   ShenandoahHeap* heap = ShenandoahHeap::heap();
 164 
 165   GCTimer* gc_timer = heap->gc_timer();
 166 
 167   ShenandoahGCSession session;
 168 
 169   // Cycle started
 170   heap->shenandoahPolicy()->record_cycle_start();
 171 
 172   // Capture peak occupancy right after starting the cycle
 173   heap->shenandoahPolicy()->record_peak_occupancy();
 174 
 175   GCIdMark gc_id_mark;
 176   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 177   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 178 
 179   // Mark requires clean bitmaps. Clear them here, before diving into STW.
 180   // There is a potential race from this moment on to TAMS reset in init mark: the bitmaps
 181   // would be clear, but TAMS not yet updated.
 182   {
 183     GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 184     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 185     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
 186     WorkGang *workers = heap->workers();
 187     ShenandoahPushWorkerScope scope(workers, ConcGCThreads);
 188     heap->reset_mark_bitmap(workers);
 189   }
 190 
 191   // Start initial mark under STW:
 192   {
 193     // Workers are setup by VM_ShenandoahInitMark
 194     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 195     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 196     ShenandoahGCPhase init_mark_phase(ShenandoahPhaseTimings::init_mark_gross);
 197     VM_ShenandoahInitMark initMark;
 198     VMThread::execute(&initMark);
 199   }
 200 
 201   if (check_cancellation()) return;
 202 
 203   // Continue concurrent mark:
 204   {
 205     // Setup workers for concurrent marking phase
 206     WorkGang* workers = heap->workers();
 207     uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_marking();
 208     ShenandoahWorkerScope scope(workers, n_workers);
 209 
 210     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);


 321     if (heap->cancelled_concgc()) {
 322       heap->shenandoahPolicy()->record_uprefs_cancelled();
 323       if (_full_gc_cause == GCCause::_allocation_failure &&
 324           heap->shenandoahPolicy()->handover_cancelled_uprefs()) {
 325         clear_full_gc = true;
 326         heap->shenandoahPolicy()->record_uprefs_degenerated();
 327       } else {
 328         return;
 329       }
 330     } else {
 331       heap->shenandoahPolicy()->record_uprefs_success();
 332     }
 333 
 334     if (do_it) {
 335       TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 336       ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
 337       ShenandoahGCPhase final_update_refs_phase(ShenandoahPhaseTimings::final_update_refs_gross);
 338       VM_ShenandoahFinalUpdateRefs final_update_refs;
 339       VMThread::execute(&final_update_refs);
 340     }
 341 
 342     if (do_it) {
 343       GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 344       ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 345       ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 346       heap->recycle_trash();
 347     }
 348 
 349     // Allocations happen during bitmap cleanup, record peak after the phase:
 350     heap->shenandoahPolicy()->record_peak_occupancy();
 351 
 352   } else {
 353     // If update-refs were skipped, need to do another verification pass after evacuation.
 354     if (ShenandoahVerify && !check_cancellation()) {
 355       VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
 356       VMThread::execute(&verify_after_evacuation);
 357     }
 358   }
 359 
 360   // Prepare for the next normal cycle:
 361   if (check_cancellation()) return;
 362 
 363   if (clear_full_gc) {
 364     reset_full_gc();
 365   }




















 366 
 367   // Cycle is complete
 368   heap->shenandoahPolicy()->record_cycle_end();
 369 
 370   // TODO: Call this properly with Shenandoah*CycleMark
 371   heap->set_used_at_last_gc();
 372 }
 373 
 374 bool ShenandoahConcurrentThread::check_cancellation() {
 375   ShenandoahHeap* heap = ShenandoahHeap::heap();
 376   if (heap->cancelled_concgc()) {
 377     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 378     return true;
 379   }
 380   return false;
 381 }
 382 
 383 
 384 void ShenandoahConcurrentThread::stop_service() {
 385   // Nothing to do here.


< prev index next >