138
139 void work(uint worker_id) {
140 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
141 ShenandoahParallelWorkerSession worker_session(worker_id);
142
143 ShenandoahHeap* heap = ShenandoahHeap::heap();
144 ShenandoahUpdateRefsClosure cl;
145 if (_check_alive) {
146 ShenandoahForwardedIsAliveClosure is_alive;
147 _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl);
148 } else {
149 AlwaysTrueClosure always_true;;
150 _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
151 }
152 }
153 };
154
155 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
156 private:
157 ShenandoahConcurrentMark* _cm;
158 ShenandoahTaskTerminator* _terminator;
159
160 public:
161 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
162 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
163 }
164
165 void work(uint worker_id) {
166 ShenandoahHeap* heap = ShenandoahHeap::heap();
167 ShenandoahConcurrentWorkerSession worker_session(worker_id);
168 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
169 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
170 ReferenceProcessor* rp;
171 if (heap->process_references()) {
172 rp = heap->ref_processor();
173 shenandoah_assert_rp_isalive_installed();
174 } else {
175 rp = NULL;
176 }
177
178 _cm->concurrent_scan_code_roots(worker_id, rp);
179 _cm->mark_loop(worker_id, _terminator, rp,
180 true, // cancellable
181 ShenandoahStringDedup::is_enabled()); // perform string dedup
185 class ShenandoahSATBThreadsClosure : public ThreadClosure {
186 private:
187 ShenandoahSATBBufferClosure* _satb_cl;
188 uintx _claim_token;
189
190 public:
191 ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
192 _satb_cl(satb_cl),
193 _claim_token(Threads::thread_claim_token()) {}
194
195 void do_thread(Thread* thread) {
196 if (thread->claim_threads_do(true, _claim_token)) {
197 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
198 }
199 }
200 };
201
202 class ShenandoahFinalMarkingTask : public AbstractGangTask {
203 private:
204 ShenandoahConcurrentMark* _cm;
205 ShenandoahTaskTerminator* _terminator;
206 bool _dedup_string;
207
208 public:
209 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
210 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
211 }
212
213 void work(uint worker_id) {
214 ShenandoahHeap* heap = ShenandoahHeap::heap();
215
216 ShenandoahParallelWorkerSession worker_session(worker_id);
217 // First drain remaining SATB buffers.
218 // Notice that this is not strictly necessary for mark-compact. But since
219 // it requires a StrongRootsScope around the task, we need to claim the
220 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
221 // full-gc.
222 {
223 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
224 ShenandoahSATBBufferClosure cl(q);
225 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
226 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
227 ShenandoahSATBThreadsClosure tc(&cl);
228 Threads::threads_do(&tc);
229 }
388
389 ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
390
391 if (_heap->process_references()) {
392 ReferenceProcessor* rp = _heap->ref_processor();
393 rp->set_active_mt_degree(nworkers);
394
395 // enable ("weak") refs discovery
396 rp->enable_discovery(true /*verify_no_refs*/);
397 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
398 }
399
400 shenandoah_assert_rp_isalive_not_installed();
401 ShenandoahIsAliveSelector is_alive;
402 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
403
404 task_queues()->reserve(nworkers);
405
406 {
407 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
408 ShenandoahTaskTerminator terminator(nworkers, task_queues());
409 ShenandoahConcurrentMarkingTask task(this, &terminator);
410 workers->run_task(&task);
411 }
412
413 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
414 }
415
416 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
417 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
418
419 uint nworkers = _heap->workers()->active_workers();
420
421 // Finally mark everything else we've got in our queues during the previous steps.
422 // It does two different things for concurrent vs. mark-compact GC:
423 // - For concurrent GC, it starts with empty task queues, drains the remaining
424 // SATB buffers, and then completes the marking closure.
425 // - For mark-compact GC, it starts out with the task queues seeded by initial
426 // root scan, and completes the closure, thus marking through all live objects
427 // The implementation is the same, so it's shared here.
428 {
429 ShenandoahGCPhase phase(full_gc ?
430 ShenandoahPhaseTimings::full_gc_mark_finish_queues :
431 ShenandoahPhaseTimings::finish_queues);
432 task_queues()->reserve(nworkers);
433
434 shenandoah_assert_rp_isalive_not_installed();
435 ShenandoahIsAliveSelector is_alive;
436 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
437
438 ShenandoahTerminationTracker termination_tracker(full_gc ?
439 ShenandoahPhaseTimings::full_gc_mark_termination :
440 ShenandoahPhaseTimings::termination);
441
442 StrongRootsScope scope(nworkers);
443 ShenandoahTaskTerminator terminator(nworkers, task_queues());
444 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
445 _heap->workers()->run_task(&task);
446 }
447
448 assert(task_queues()->is_empty(), "Should be empty");
449
450 // When we're done marking everything, we process weak references.
451 if (_heap->process_references()) {
452 weak_refs_work(full_gc);
453 }
454
455 assert(task_queues()->is_empty(), "Should be empty");
456 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
457 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
458 }
459
460 // Weak Reference Closures
461 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
462 uint _worker_id;
463 ShenandoahTaskTerminator* _terminator;
464 bool _reset_terminator;
465
466 public:
467 ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
468 _worker_id(worker_id),
469 _terminator(t),
470 _reset_terminator(reset_terminator) {
471 }
472
473 void do_void() {
474 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
475
476 ShenandoahHeap* sh = ShenandoahHeap::heap();
477 ShenandoahConcurrentMark* scm = sh->concurrent_mark();
478 assert(sh->process_references(), "why else would we be here?");
479 ReferenceProcessor* rp = sh->ref_processor();
480
481 shenandoah_assert_rp_isalive_installed();
482
483 scm->mark_loop(_worker_id, _terminator, rp,
484 false, // not cancellable
485 false); // do not do strdedup
486
487 if (_reset_terminator) {
535 class ShenandoahWeakUpdateClosure : public OopClosure {
536 private:
537 ShenandoahHeap* const _heap;
538
539 template <class T>
540 inline void do_oop_work(T* p) {
541 oop o = _heap->maybe_update_with_forwarded(p);
542 shenandoah_assert_marked_except(p, o, o == NULL);
543 }
544
545 public:
546 ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
547
548 void do_oop(narrowOop* p) { do_oop_work(p); }
549 void do_oop(oop* p) { do_oop_work(p); }
550 };
551
552 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
553 private:
554 AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
555 ShenandoahTaskTerminator* _terminator;
556
557 public:
558 ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
559 ShenandoahTaskTerminator* t) :
560 AbstractGangTask("Process reference objects in parallel"),
561 _proc_task(proc_task),
562 _terminator(t) {
563 }
564
565 void work(uint worker_id) {
566 ResourceMark rm;
567 HandleMark hm;
568 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
569 ShenandoahHeap* heap = ShenandoahHeap::heap();
570 ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
571 if (heap->has_forwarded_objects()) {
572 ShenandoahForwardedIsAliveClosure is_alive;
573 ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
574 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
575 } else {
576 ShenandoahIsAliveClosure is_alive;
577 ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
578 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
579 }
583 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
584 private:
585 WorkGang* _workers;
586
587 public:
588 ShenandoahRefProcTaskExecutor(WorkGang* workers) :
589 _workers(workers) {
590 }
591
592 // Executes a task using worker threads.
593 void execute(ProcessTask& task, uint ergo_workers) {
594 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
595
596 ShenandoahHeap* heap = ShenandoahHeap::heap();
597 ShenandoahConcurrentMark* cm = heap->concurrent_mark();
598 ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
599 ergo_workers,
600 /* do_check = */ false);
601 uint nworkers = _workers->active_workers();
602 cm->task_queues()->reserve(nworkers);
603 ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
604 ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
605 _workers->run_task(&proc_task_proxy);
606 }
607 };
608
609 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
610 assert(_heap->process_references(), "sanity");
611
612 ShenandoahPhaseTimings::Phase phase_root =
613 full_gc ?
614 ShenandoahPhaseTimings::full_gc_weakrefs :
615 ShenandoahPhaseTimings::weakrefs;
616
617 ShenandoahGCPhase phase(phase_root);
618
619 ReferenceProcessor* rp = _heap->ref_processor();
620
621 // NOTE: We cannot shortcut on has_discovered_references() here, because
622 // we will miss marking JNI Weak refs then, see implementation in
623 // ReferenceProcessor::process_discovered_references.
641 ShenandoahPhaseTimings::full_gc_weakrefs_termination :
642 ShenandoahPhaseTimings::weakrefs_termination;
643
644 shenandoah_assert_rp_isalive_not_installed();
645 ShenandoahIsAliveSelector is_alive;
646 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
647
648 WorkGang* workers = _heap->workers();
649 uint nworkers = workers->active_workers();
650
651 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
652 rp->set_active_mt_degree(nworkers);
653
654 assert(task_queues()->is_empty(), "Should be empty");
655
656 // complete_gc and keep_alive closures instantiated here are only needed for
657 // single-threaded path in RP. They share the queue 0 for tracking work, which
658 // simplifies implementation. Since RP may decide to call complete_gc several
659 // times, we need to be able to reuse the terminator.
660 uint serial_worker_id = 0;
661 ShenandoahTaskTerminator terminator(1, task_queues());
662 ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
663
664 ShenandoahRefProcTaskExecutor executor(workers);
665
666 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
667
668 {
669 ShenandoahGCPhase phase(phase_process);
670 ShenandoahTerminationTracker phase_term(phase_process_termination);
671
672 if (_heap->has_forwarded_objects()) {
673 ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
674 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
675 &complete_gc, &executor,
676 &pt);
677
678 } else {
679 ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
680 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
681 &complete_gc, &executor,
686 pt.print_all_references();
687
688 assert(task_queues()->is_empty(), "Should be empty");
689 }
690 }
691
692 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
693 private:
694 ShenandoahHeap* const _heap;
695 public:
696 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
697 virtual bool should_return() { return _heap->cancelled_gc(); }
698 };
699
700 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
701 public:
702 void do_void() {
703 ShenandoahHeap* sh = ShenandoahHeap::heap();
704 ShenandoahConcurrentMark* scm = sh->concurrent_mark();
705 assert(sh->process_references(), "why else would we be here?");
706 ShenandoahTaskTerminator terminator(1, scm->task_queues());
707
708 ReferenceProcessor* rp = sh->ref_processor();
709 shenandoah_assert_rp_isalive_installed();
710
711 scm->mark_loop(0, &terminator, rp,
712 false, // not cancellable
713 false); // do not do strdedup
714 }
715 };
716
717 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
718 private:
719 ShenandoahObjToScanQueue* _queue;
720 ShenandoahHeap* _heap;
721 ShenandoahMarkingContext* const _mark_context;
722
723 template <class T>
724 inline void do_oop_work(T* p) {
725 ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
726 }
809 workers->run_task(&task);
810
811 assert(task_queues()->is_empty(), "Should be empty");
812 }
813
814 void ShenandoahConcurrentMark::cancel() {
815 // Clean up marking stacks.
816 ShenandoahObjToScanQueueSet* queues = task_queues();
817 queues->clear();
818
819 // Cancel SATB buffers.
820 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
821 }
822
823 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
824 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
825 return _task_queues->queue(worker_id);
826 }
827
828 template <bool CANCELLABLE>
829 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
830 bool strdedup) {
831 ShenandoahObjToScanQueue* q = get_queue(w);
832
833 jushort* ld = _heap->get_liveness_cache(w);
834
835 // TODO: We can clean up this if we figure out how to do templated oop closures that
836 // play nice with specialized_oop_iterators.
837 if (_heap->unload_classes()) {
838 if (_heap->has_forwarded_objects()) {
839 if (strdedup) {
840 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
841 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
842 } else {
843 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
844 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
845 }
846 } else {
847 if (strdedup) {
848 ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
849 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
859 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
860 } else {
861 ShenandoahMarkUpdateRefsClosure cl(q, rp);
862 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
863 }
864 } else {
865 if (strdedup) {
866 ShenandoahMarkRefsDedupClosure cl(q, rp);
867 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
868 } else {
869 ShenandoahMarkRefsClosure cl(q, rp);
870 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
871 }
872 }
873 }
874
875 _heap->flush_liveness_cache(w);
876 }
877
878 template <class T, bool CANCELLABLE>
879 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
880 uintx stride = ShenandoahMarkLoopStride;
881
882 ShenandoahHeap* heap = ShenandoahHeap::heap();
883 ShenandoahObjToScanQueueSet* queues = task_queues();
884 ShenandoahObjToScanQueue* q;
885 ShenandoahMarkTask t;
886
887 /*
888 * Process outstanding queues, if any.
889 *
890 * There can be more queues than workers. To deal with the imbalance, we claim
891 * extra queues first. Since marking can push new tasks into the queue associated
892 * with this worker id, we come back to process this queue in the normal loop.
893 */
894 assert(queues->get_reserved() == heap->workers()->active_workers(),
895 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
896
897 q = queues->claim_next();
898 while (q != NULL) {
899 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
|
138
139 void work(uint worker_id) {
140 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
141 ShenandoahParallelWorkerSession worker_session(worker_id);
142
143 ShenandoahHeap* heap = ShenandoahHeap::heap();
144 ShenandoahUpdateRefsClosure cl;
145 if (_check_alive) {
146 ShenandoahForwardedIsAliveClosure is_alive;
147 _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl);
148 } else {
149 AlwaysTrueClosure always_true;;
150 _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
151 }
152 }
153 };
154
155 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
156 private:
157 ShenandoahConcurrentMark* _cm;
158 TaskTerminator* _terminator;
159
160 public:
161 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
162 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
163 }
164
165 void work(uint worker_id) {
166 ShenandoahHeap* heap = ShenandoahHeap::heap();
167 ShenandoahConcurrentWorkerSession worker_session(worker_id);
168 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
169 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
170 ReferenceProcessor* rp;
171 if (heap->process_references()) {
172 rp = heap->ref_processor();
173 shenandoah_assert_rp_isalive_installed();
174 } else {
175 rp = NULL;
176 }
177
178 _cm->concurrent_scan_code_roots(worker_id, rp);
179 _cm->mark_loop(worker_id, _terminator, rp,
180 true, // cancellable
181 ShenandoahStringDedup::is_enabled()); // perform string dedup
185 class ShenandoahSATBThreadsClosure : public ThreadClosure {
186 private:
187 ShenandoahSATBBufferClosure* _satb_cl;
188 uintx _claim_token;
189
190 public:
191 ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
192 _satb_cl(satb_cl),
193 _claim_token(Threads::thread_claim_token()) {}
194
195 void do_thread(Thread* thread) {
196 if (thread->claim_threads_do(true, _claim_token)) {
197 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
198 }
199 }
200 };
201
202 class ShenandoahFinalMarkingTask : public AbstractGangTask {
203 private:
204 ShenandoahConcurrentMark* _cm;
205 TaskTerminator* _terminator;
206 bool _dedup_string;
207
208 public:
209 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
210 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
211 }
212
213 void work(uint worker_id) {
214 ShenandoahHeap* heap = ShenandoahHeap::heap();
215
216 ShenandoahParallelWorkerSession worker_session(worker_id);
217 // First drain remaining SATB buffers.
218 // Notice that this is not strictly necessary for mark-compact. But since
219 // it requires a StrongRootsScope around the task, we need to claim the
220 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
221 // full-gc.
222 {
223 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
224 ShenandoahSATBBufferClosure cl(q);
225 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
226 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
227 ShenandoahSATBThreadsClosure tc(&cl);
228 Threads::threads_do(&tc);
229 }
388
389 ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
390
391 if (_heap->process_references()) {
392 ReferenceProcessor* rp = _heap->ref_processor();
393 rp->set_active_mt_degree(nworkers);
394
395 // enable ("weak") refs discovery
396 rp->enable_discovery(true /*verify_no_refs*/);
397 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
398 }
399
400 shenandoah_assert_rp_isalive_not_installed();
401 ShenandoahIsAliveSelector is_alive;
402 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
403
404 task_queues()->reserve(nworkers);
405
406 {
407 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
408 TaskTerminator terminator(nworkers, task_queues());
409 ShenandoahConcurrentMarkingTask task(this, &terminator);
410 workers->run_task(&task);
411 }
412
413 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
414 }
415
416 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
417 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
418
419 uint nworkers = _heap->workers()->active_workers();
420
421 // Finally mark everything else we've got in our queues during the previous steps.
422 // It does two different things for concurrent vs. mark-compact GC:
423 // - For concurrent GC, it starts with empty task queues, drains the remaining
424 // SATB buffers, and then completes the marking closure.
425 // - For mark-compact GC, it starts out with the task queues seeded by initial
426 // root scan, and completes the closure, thus marking through all live objects
427 // The implementation is the same, so it's shared here.
428 {
429 ShenandoahGCPhase phase(full_gc ?
430 ShenandoahPhaseTimings::full_gc_mark_finish_queues :
431 ShenandoahPhaseTimings::finish_queues);
432 task_queues()->reserve(nworkers);
433
434 shenandoah_assert_rp_isalive_not_installed();
435 ShenandoahIsAliveSelector is_alive;
436 ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
437
438 ShenandoahTerminationTracker termination_tracker(full_gc ?
439 ShenandoahPhaseTimings::full_gc_mark_termination :
440 ShenandoahPhaseTimings::termination);
441
442 StrongRootsScope scope(nworkers);
443 TaskTerminator terminator(nworkers, task_queues());
444 ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
445 _heap->workers()->run_task(&task);
446 }
447
448 assert(task_queues()->is_empty(), "Should be empty");
449
450 // When we're done marking everything, we process weak references.
451 if (_heap->process_references()) {
452 weak_refs_work(full_gc);
453 }
454
455 assert(task_queues()->is_empty(), "Should be empty");
456 TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
457 TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
458 }
459
460 // Weak Reference Closures
461 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
462 uint _worker_id;
463 TaskTerminator* _terminator;
464 bool _reset_terminator;
465
466 public:
467 ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
468 _worker_id(worker_id),
469 _terminator(t),
470 _reset_terminator(reset_terminator) {
471 }
472
473 void do_void() {
474 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
475
476 ShenandoahHeap* sh = ShenandoahHeap::heap();
477 ShenandoahConcurrentMark* scm = sh->concurrent_mark();
478 assert(sh->process_references(), "why else would we be here?");
479 ReferenceProcessor* rp = sh->ref_processor();
480
481 shenandoah_assert_rp_isalive_installed();
482
483 scm->mark_loop(_worker_id, _terminator, rp,
484 false, // not cancellable
485 false); // do not do strdedup
486
487 if (_reset_terminator) {
535 class ShenandoahWeakUpdateClosure : public OopClosure {
536 private:
537 ShenandoahHeap* const _heap;
538
539 template <class T>
540 inline void do_oop_work(T* p) {
541 oop o = _heap->maybe_update_with_forwarded(p);
542 shenandoah_assert_marked_except(p, o, o == NULL);
543 }
544
545 public:
546 ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
547
548 void do_oop(narrowOop* p) { do_oop_work(p); }
549 void do_oop(oop* p) { do_oop_work(p); }
550 };
551
552 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
553 private:
554 AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
555 TaskTerminator* _terminator;
556
557 public:
558 ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
559 TaskTerminator* t) :
560 AbstractGangTask("Process reference objects in parallel"),
561 _proc_task(proc_task),
562 _terminator(t) {
563 }
564
565 void work(uint worker_id) {
566 ResourceMark rm;
567 HandleMark hm;
568 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
569 ShenandoahHeap* heap = ShenandoahHeap::heap();
570 ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
571 if (heap->has_forwarded_objects()) {
572 ShenandoahForwardedIsAliveClosure is_alive;
573 ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
574 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
575 } else {
576 ShenandoahIsAliveClosure is_alive;
577 ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
578 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
579 }
583 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
584 private:
585 WorkGang* _workers;
586
587 public:
588 ShenandoahRefProcTaskExecutor(WorkGang* workers) :
589 _workers(workers) {
590 }
591
592 // Executes a task using worker threads.
593 void execute(ProcessTask& task, uint ergo_workers) {
594 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
595
596 ShenandoahHeap* heap = ShenandoahHeap::heap();
597 ShenandoahConcurrentMark* cm = heap->concurrent_mark();
598 ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
599 ergo_workers,
600 /* do_check = */ false);
601 uint nworkers = _workers->active_workers();
602 cm->task_queues()->reserve(nworkers);
603 TaskTerminator terminator(nworkers, cm->task_queues());
604 ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
605 _workers->run_task(&proc_task_proxy);
606 }
607 };
608
609 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
610 assert(_heap->process_references(), "sanity");
611
612 ShenandoahPhaseTimings::Phase phase_root =
613 full_gc ?
614 ShenandoahPhaseTimings::full_gc_weakrefs :
615 ShenandoahPhaseTimings::weakrefs;
616
617 ShenandoahGCPhase phase(phase_root);
618
619 ReferenceProcessor* rp = _heap->ref_processor();
620
621 // NOTE: We cannot shortcut on has_discovered_references() here, because
622 // we will miss marking JNI Weak refs then, see implementation in
623 // ReferenceProcessor::process_discovered_references.
641 ShenandoahPhaseTimings::full_gc_weakrefs_termination :
642 ShenandoahPhaseTimings::weakrefs_termination;
643
644 shenandoah_assert_rp_isalive_not_installed();
645 ShenandoahIsAliveSelector is_alive;
646 ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
647
648 WorkGang* workers = _heap->workers();
649 uint nworkers = workers->active_workers();
650
651 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
652 rp->set_active_mt_degree(nworkers);
653
654 assert(task_queues()->is_empty(), "Should be empty");
655
656 // complete_gc and keep_alive closures instantiated here are only needed for
657 // single-threaded path in RP. They share the queue 0 for tracking work, which
658 // simplifies implementation. Since RP may decide to call complete_gc several
659 // times, we need to be able to reuse the terminator.
660 uint serial_worker_id = 0;
661 TaskTerminator terminator(1, task_queues());
662 ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
663
664 ShenandoahRefProcTaskExecutor executor(workers);
665
666 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
667
668 {
669 ShenandoahGCPhase phase(phase_process);
670 ShenandoahTerminationTracker phase_term(phase_process_termination);
671
672 if (_heap->has_forwarded_objects()) {
673 ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
674 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
675 &complete_gc, &executor,
676 &pt);
677
678 } else {
679 ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
680 rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
681 &complete_gc, &executor,
686 pt.print_all_references();
687
688 assert(task_queues()->is_empty(), "Should be empty");
689 }
690 }
691
692 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
693 private:
694 ShenandoahHeap* const _heap;
695 public:
696 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
697 virtual bool should_return() { return _heap->cancelled_gc(); }
698 };
699
700 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
701 public:
702 void do_void() {
703 ShenandoahHeap* sh = ShenandoahHeap::heap();
704 ShenandoahConcurrentMark* scm = sh->concurrent_mark();
705 assert(sh->process_references(), "why else would we be here?");
706 TaskTerminator terminator(1, scm->task_queues());
707
708 ReferenceProcessor* rp = sh->ref_processor();
709 shenandoah_assert_rp_isalive_installed();
710
711 scm->mark_loop(0, &terminator, rp,
712 false, // not cancellable
713 false); // do not do strdedup
714 }
715 };
716
717 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
718 private:
719 ShenandoahObjToScanQueue* _queue;
720 ShenandoahHeap* _heap;
721 ShenandoahMarkingContext* const _mark_context;
722
723 template <class T>
724 inline void do_oop_work(T* p) {
725 ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
726 }
809 workers->run_task(&task);
810
811 assert(task_queues()->is_empty(), "Should be empty");
812 }
813
814 void ShenandoahConcurrentMark::cancel() {
815 // Clean up marking stacks.
816 ShenandoahObjToScanQueueSet* queues = task_queues();
817 queues->clear();
818
819 // Cancel SATB buffers.
820 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
821 }
822
823 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
824 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
825 return _task_queues->queue(worker_id);
826 }
827
828 template <bool CANCELLABLE>
829 void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp,
830 bool strdedup) {
831 ShenandoahObjToScanQueue* q = get_queue(w);
832
833 jushort* ld = _heap->get_liveness_cache(w);
834
835 // TODO: We can clean up this if we figure out how to do templated oop closures that
836 // play nice with specialized_oop_iterators.
837 if (_heap->unload_classes()) {
838 if (_heap->has_forwarded_objects()) {
839 if (strdedup) {
840 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
841 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
842 } else {
843 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
844 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
845 }
846 } else {
847 if (strdedup) {
848 ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
849 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
859 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
860 } else {
861 ShenandoahMarkUpdateRefsClosure cl(q, rp);
862 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
863 }
864 } else {
865 if (strdedup) {
866 ShenandoahMarkRefsDedupClosure cl(q, rp);
867 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
868 } else {
869 ShenandoahMarkRefsClosure cl(q, rp);
870 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
871 }
872 }
873 }
874
875 _heap->flush_liveness_cache(w);
876 }
877
878 template <class T, bool CANCELLABLE>
879 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator *terminator) {
880 uintx stride = ShenandoahMarkLoopStride;
881
882 ShenandoahHeap* heap = ShenandoahHeap::heap();
883 ShenandoahObjToScanQueueSet* queues = task_queues();
884 ShenandoahObjToScanQueue* q;
885 ShenandoahMarkTask t;
886
887 /*
888 * Process outstanding queues, if any.
889 *
890 * There can be more queues than workers. To deal with the imbalance, we claim
891 * extra queues first. Since marking can push new tasks into the queue associated
892 * with this worker id, we come back to process this queue in the normal loop.
893 */
894 assert(queues->get_reserved() == heap->workers()->active_workers(),
895 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
896
897 q = queues->claim_next();
898 while (q != NULL) {
899 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
|