< prev index next >

src/share/vm/gc/shenandoah/shenandoahConcurrentMark.cpp

Print this page




 224     } else {
 225       ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);
 226       _cm->concurrent_mark_loop(&cl, worker_id, q,  _terminator);
 227     }
 228   }
 229 };
 230 
 231 class SCMFinalMarkingTask : public AbstractGangTask {
 232 private:
 233   ShenandoahConcurrentMark* _cm;
 234   ParallelTaskTerminator* _terminator;
 235   bool _update_refs;
 236   bool _count_live;
 237 
 238 public:
 239   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live) :
 240     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live) {
 241   }
 242 
 243   void work(uint worker_id) {







 244     ReferenceProcessor* rp;
 245     if (_cm->process_references()) {
 246       rp = ShenandoahHeap::heap()->ref_processor();
 247     } else {
 248       rp = NULL;
 249     }
 250     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 251 
 252     // Templates need constexprs, so we have to switch by the flags ourselves.
 253     if (_update_refs) {
 254       if (_count_live) {
 255         ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, true> cl(q, rp);
 256         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 257       } else {
 258         ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, false> cl(q, rp);
 259         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 260       }
 261     } else {
 262       if (_count_live) {
 263         ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);


 375   if (UseShenandoahOWST) {
 376     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 377     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 378     sh->conc_workers()->run_task(&markingTask, nworkers);
 379   } else {
 380     ParallelTaskTerminator terminator(nworkers, task_queues());
 381     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 382     sh->conc_workers()->run_task(&markingTask, nworkers);
 383   }
 384 
 385   assert(task_queues()->is_empty(), "Should be empty");
 386   if (! sh->cancelled_concgc()) {
 387     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 388   }
 389 
 390   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 391 
 392   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 393 }
 394 
 395 class FinishDrainSATBBuffersTask : public AbstractGangTask {
 396 private:
 397   ShenandoahConcurrentMark* _cm;
 398   ParallelTaskTerminator* _terminator;
 399 public:
 400   FinishDrainSATBBuffersTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator) :
 401     AbstractGangTask("Finish draining SATB buffers"), _cm(cm), _terminator(terminator) {
 402   }
 403 
 404   void work(uint worker_id) {
 405     _cm->drain_satb_buffers(worker_id, true);
 406   }
 407 };
 408 
 409 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 410   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 411 
 412   IsGCActiveMark is_active;
 413 
 414   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 415 
 416   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 417 
 418   uint nworkers = sh->max_parallel_workers();
 419   task_queues()->reserve(nworkers);
 420 
 421   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::drain_satb);
 422   {
 423     StrongRootsScope scope(nworkers);
 424     if (UseShenandoahOWST) {
 425       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 426       // drain_satb_buffers(0, true);
 427       FinishDrainSATBBuffersTask drain_satb_buffers(this, &terminator);
 428       sh->workers()->run_task(&drain_satb_buffers, nworkers);
 429     } else {
 430       ParallelTaskTerminator terminator(nworkers, task_queues());
 431       // drain_satb_buffers(0, true);
 432       FinishDrainSATBBuffersTask drain_satb_buffers(this, &terminator);
 433       sh->workers()->run_task(&drain_satb_buffers, nworkers);
 434     }
 435     sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::drain_satb);
 436   }
 437 
 438   shared_finish_mark_from_roots(/* full_gc = */ false);
 439 
 440   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 441   if (sh->need_update_refs()) {
 442     final_update_roots();
 443   }
 444   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 445 
 446   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 447 
 448 #ifdef ASSERT
 449   verify_roots();
 450 
 451   if (ShenandoahDumpHeapAfterConcurrentMark) {
 452     sh->ensure_parsability(false);
 453     sh->print_all_refs("post-mark");
 454   }
 455 #endif
 456 }
 457 
 458 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 459   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 460 
 461   ShenandoahHeap* sh = ShenandoahHeap::heap();
 462   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 463 
 464   uint nworkers = sh->max_parallel_workers();
 465   // Finally mark everything else we've got in our queues during the previous steps.
 466   {
 467     policy->record_phase_start(full_gc ?
 468                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 469                                ShenandoahCollectorPolicy::drain_queues);
 470     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 471     task_queues()->reserve(nworkers);
 472 

 473     if (UseShenandoahOWST) {
 474       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 475       SCMFinalMarkingTask markingTask = SCMFinalMarkingTask(this, &terminator, sh->need_update_refs(), count_live);
 476       sh->workers()->run_task(&markingTask);
 477     } else {
 478       ParallelTaskTerminator terminator(nworkers, task_queues());
 479       SCMFinalMarkingTask markingTask = SCMFinalMarkingTask(this, &terminator, sh->need_update_refs(), count_live);
 480       sh->workers()->run_task(&markingTask);
 481     }
 482     policy->record_phase_end(full_gc ?
 483                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 484                              ShenandoahCollectorPolicy::drain_queues);
 485   }
 486 
 487   assert(task_queues()->is_empty(), "Should be empty");
 488 
 489   // When we're done marking everything, we process weak references.
 490   policy->record_phase_start(full_gc ?
 491                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 492                              ShenandoahCollectorPolicy::weakrefs);
 493   if (process_references()) {
 494     weak_refs_work();
 495   }
 496   policy->record_phase_end(full_gc ?
 497                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 498                            ShenandoahCollectorPolicy::weakrefs);
 499 
 500   // And finally finish class unloading
 501   policy->record_phase_start(full_gc ?
 502                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 503                              ShenandoahCollectorPolicy::class_unloading);
 504   if (unload_classes()) {




 224     } else {
 225       ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);
 226       _cm->concurrent_mark_loop(&cl, worker_id, q,  _terminator);
 227     }
 228   }
 229 };
 230 
 231 class SCMFinalMarkingTask : public AbstractGangTask {
 232 private:
 233   ShenandoahConcurrentMark* _cm;
 234   ParallelTaskTerminator* _terminator;
 235   bool _update_refs;
 236   bool _count_live;
 237 
 238 public:
 239   SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live) :
 240     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live) {
 241   }
 242 
 243   void work(uint worker_id) {
 244     // First drain remaining SATB buffers.
 245     // Notice that this is not strictly necessary for mark-compact. But since
 246     // it requires a StrongRootsScope around the task, we need to claim the
 247     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 248     // full-gc.
 249     _cm->drain_satb_buffers(worker_id, true);
 250 
 251     ReferenceProcessor* rp;
 252     if (_cm->process_references()) {
 253       rp = ShenandoahHeap::heap()->ref_processor();
 254     } else {
 255       rp = NULL;
 256     }
 257     SCMObjToScanQueue* q = _cm->get_queue(worker_id);
 258 
 259     // Templates need constexprs, so we have to switch by the flags ourselves.
 260     if (_update_refs) {
 261       if (_count_live) {
 262         ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, true> cl(q, rp);
 263         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 264       } else {
 265         ShenandoahMarkObjsClosure<ShenandoahMarkUpdateRefsClosure, false> cl(q, rp);
 266         _cm->final_mark_loop(&cl, worker_id, q, _terminator);
 267       }
 268     } else {
 269       if (_count_live) {
 270         ShenandoahMarkObjsClosure<ShenandoahMarkRefsClosure, true> cl(q, rp);


 382   if (UseShenandoahOWST) {
 383     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 384     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 385     sh->conc_workers()->run_task(&markingTask, nworkers);
 386   } else {
 387     ParallelTaskTerminator terminator(nworkers, task_queues());
 388     SCMConcurrentMarkingTask markingTask = SCMConcurrentMarkingTask(this, &terminator, update_refs);
 389     sh->conc_workers()->run_task(&markingTask, nworkers);
 390   }
 391 
 392   assert(task_queues()->is_empty(), "Should be empty");
 393   if (! sh->cancelled_concgc()) {
 394     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 395   }
 396 
 397   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 398 
 399   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::conc_mark);
 400 }
 401 














 402 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 403   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 404 
 405   IsGCActiveMark is_active;
 406 
 407   ShenandoahHeap* sh = (ShenandoahHeap *) Universe::heap();
 408 
 409   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 410 




















 411   shared_finish_mark_from_roots(/* full_gc = */ false);
 412 
 413   sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::update_roots);
 414   if (sh->need_update_refs()) {
 415     final_update_roots();
 416   }
 417   sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::update_roots);
 418 
 419   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 420 
 421 #ifdef ASSERT
 422   verify_roots();
 423 
 424   if (ShenandoahDumpHeapAfterConcurrentMark) {
 425     sh->ensure_parsability(false);
 426     sh->print_all_refs("post-mark");
 427   }
 428 #endif
 429 }
 430 
 431 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 432   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 433 
 434   ShenandoahHeap* sh = ShenandoahHeap::heap();
 435   ShenandoahCollectorPolicy* policy = sh->shenandoahPolicy();
 436 
 437   uint nworkers = sh->max_parallel_workers();
 438   // Finally mark everything else we've got in our queues during the previous steps.
 439   {
 440     policy->record_phase_start(full_gc ?
 441                                ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 442                                ShenandoahCollectorPolicy::drain_satb);
 443     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 444     task_queues()->reserve(nworkers);
 445 
 446     StrongRootsScope scope(nworkers);
 447     if (UseShenandoahOWST) {
 448       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 449       SCMFinalMarkingTask markingTask = SCMFinalMarkingTask(this, &terminator, sh->need_update_refs(), count_live);
 450       sh->workers()->run_task(&markingTask);
 451     } else {
 452       ParallelTaskTerminator terminator(nworkers, task_queues());
 453       SCMFinalMarkingTask markingTask = SCMFinalMarkingTask(this, &terminator, sh->need_update_refs(), count_live);
 454       sh->workers()->run_task(&markingTask);
 455     }
 456     policy->record_phase_end(full_gc ?
 457                              ShenandoahCollectorPolicy::full_gc_mark_drain_queues :
 458                              ShenandoahCollectorPolicy::drain_satb);
 459   }
 460 
 461   assert(task_queues()->is_empty(), "Should be empty");
 462 
 463   // When we're done marking everything, we process weak references.
 464   policy->record_phase_start(full_gc ?
 465                              ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 466                              ShenandoahCollectorPolicy::weakrefs);
 467   if (process_references()) {
 468     weak_refs_work();
 469   }
 470   policy->record_phase_end(full_gc ?
 471                            ShenandoahCollectorPolicy::full_gc_mark_weakrefs :
 472                            ShenandoahCollectorPolicy::weakrefs);
 473 
 474   // And finally finish class unloading
 475   policy->record_phase_start(full_gc ?
 476                              ShenandoahCollectorPolicy::full_gc_mark_class_unloading :
 477                              ShenandoahCollectorPolicy::class_unloading);
 478   if (unload_classes()) {


< prev index next >