188 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
189 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
190 }
191
192
193 void work(uint worker_id) {
194 SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
195 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
196 jushort* live_data = _cm->get_liveness(worker_id);
197 ReferenceProcessor* rp;
198 if (_cm->process_references()) {
199 rp = ShenandoahHeap::heap()->ref_processor();
200 shenandoah_assert_rp_isalive_installed();
201 } else {
202 rp = NULL;
203 }
204
205 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
206 _cm->mark_loop(worker_id, _terminator, rp,
207 true, // cancellable
208 true, // drain SATBs as we go
209 _cm->unload_classes(),
210 _update_refs,
211 ShenandoahStringDedup::is_enabled()); // perform string dedup
212 }
213 };
214
215 class ShenandoahFinalMarkingTask : public AbstractGangTask {
216 private:
217 ShenandoahConcurrentMark* _cm;
218 ParallelTaskTerminator* _terminator;
219 bool _update_refs;
220 bool _unload_classes;
221 bool _dedup_string;
222
223 public:
224 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator,
225 bool update_refs, bool unload_classes, bool dedup_string) :
226 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator),
227 _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) {
228 }
229
230 void work(uint worker_id) {
231 // First drain remaining SATB buffers.
232 // Notice that this is not strictly necessary for mark-compact. But since
233 // it requires a StrongRootsScope around the task, we need to claim the
234 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
235 // full-gc.
236 _cm->drain_satb_buffers(worker_id, true);
237
238 ReferenceProcessor* rp;
239 if (_cm->process_references()) {
240 rp = ShenandoahHeap::heap()->ref_processor();
241 shenandoah_assert_rp_isalive_installed();
242 } else {
243 rp = NULL;
244 }
245
246 // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
247 // let's check here.
248 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
249 _cm->mark_loop(worker_id, _terminator, rp,
250 false, // not cancellable
251 false, // do not drain SATBs, already drained
252 _unload_classes,
253 _update_refs,
254 _dedup_string);
255
256 assert(_cm->task_queues()->is_empty(), "Should be empty");
257 }
258 };
259
260 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
261 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
262 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
263
264 ShenandoahHeap* heap = ShenandoahHeap::heap();
265
266 ShenandoahGCPhase phase(root_phase);
267
268 WorkGang* workers = heap->workers();
269 uint nworkers = workers->active_workers();
270
271 assert(nworkers <= task_queues()->size(), "Just check");
482
483 public:
484 ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
485 _satb_cl(satb_cl),
486 _thread_parity(Threads::thread_claim_parity()) {}
487
488 void do_thread(Thread* thread) {
489 if (thread->is_Java_thread()) {
490 if (thread->claim_oops_do(true, _thread_parity)) {
491 JavaThread* jt = (JavaThread*)thread;
492 ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
493 }
494 } else if (thread->is_VM_thread()) {
495 if (thread->claim_oops_do(true, _thread_parity)) {
496 ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
497 }
498 }
499 }
500 };
501
502 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
503 ShenandoahObjToScanQueue* q = get_queue(worker_id);
504 ShenandoahSATBBufferClosure cl(q);
505
506 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
507 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
508
509 if (remark) {
510 ShenandoahSATBThreadsClosure tc(&cl);
511 Threads::threads_do(&tc);
512 }
513 }
514
515 #if TASKQUEUE_STATS
516 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
517 st->print_raw_cr("GC Task Stats");
518 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
519 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
520 }
521
522 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
523 if (!log_develop_is_enabled(Trace, gc, task, stats)) {
524 return;
525 }
526 Log(gc, task, stats) log;
527 ResourceMark rm;
528 LogStream ls(log.trace());
529 outputStream* st = &ls;
530 print_taskqueue_stats_hdr(st);
531
532 TaskQueueStats totals;
558
559 public:
560 ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
561 _worker_id(worker_id),
562 _terminator(t),
563 _reset_terminator(reset_terminator) {
564 }
565
566 void do_void() {
567 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
568
569 ShenandoahHeap* sh = ShenandoahHeap::heap();
570 ShenandoahConcurrentMark* scm = sh->concurrentMark();
571 assert(scm->process_references(), "why else would we be here?");
572 ReferenceProcessor* rp = sh->ref_processor();
573
574 shenandoah_assert_rp_isalive_installed();
575
576 scm->mark_loop(_worker_id, _terminator, rp,
577 false, // not cancellable
578 false, // do not drain SATBs
579 scm->unload_classes(),
580 sh->has_forwarded_objects(),
581 false); // do not do strdedup
582
583 if (_reset_terminator) {
584 _terminator->reset_for_reuse();
585 }
586 }
587 };
588
589
590 class ShenandoahCMKeepAliveClosure : public OopClosure {
591 private:
592 ShenandoahObjToScanQueue* _queue;
593 ShenandoahHeap* _heap;
594
595 template <class T>
596 inline void do_oop_nv(T* p) {
597 ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue);
598 }
784 private:
785 ShenandoahHeap* const _heap;
786 public:
787 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
788 virtual bool should_return() { return _heap->cancelled_gc(); }
789 };
790
791 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
792 public:
793 void do_void() {
794 ShenandoahHeap* sh = ShenandoahHeap::heap();
795 ShenandoahConcurrentMark* scm = sh->concurrentMark();
796 assert(scm->process_references(), "why else would we be here?");
797 ParallelTaskTerminator terminator(1, scm->task_queues());
798
799 ReferenceProcessor* rp = sh->ref_processor();
800 shenandoah_assert_rp_isalive_installed();
801
802 scm->mark_loop(0, &terminator, rp,
803 false, // not cancellable
804 true, // drain SATBs
805 scm->unload_classes(),
806 sh->has_forwarded_objects(),
807 false); // do not do strdedup
808 }
809 };
810
811 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
812 private:
813 ShenandoahObjToScanQueue* _queue;
814 ShenandoahHeap* _heap;
815
816 template <class T>
817 inline void do_oop_nv(T* p) {
818 ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue);
819 }
820
821 public:
822 ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
823 _queue(q), _heap(ShenandoahHeap::heap()) {}
824
877 void ShenandoahConcurrentMark::cancel() {
878 // Clean up marking stacks.
879 ShenandoahObjToScanQueueSet* queues = task_queues();
880 queues->clear();
881
882 // Cancel SATB buffers.
883 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
884 }
885
886 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
887 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
888 return _task_queues->queue(worker_id);
889 }
890
891 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
892 q->set_empty();
893 q->overflow_stack()->clear();
894 q->clear_buffer();
895 }
896
897 template <bool CANCELLABLE, bool DRAIN_SATB>
898 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
899 bool class_unload, bool update_refs, bool strdedup) {
900 ShenandoahObjToScanQueue* q = get_queue(w);
901
902 jushort* ld = get_liveness(w);
903 Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
904
905 // TODO: We can clean up this if we figure out how to do templated oop closures that
906 // play nice with specialized_oop_iterators.
907 if (class_unload) {
908 if (update_refs) {
909 if (strdedup) {
910 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
911 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
912 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
913 } else {
914 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
915 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
916 }
917 } else {
918 if (strdedup) {
919 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
920 ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
921 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
922 } else {
923 ShenandoahMarkRefsMetadataClosure cl(q, rp);
924 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
925 }
926 }
927 } else {
928 if (update_refs) {
929 if (strdedup) {
930 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
931 ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
932 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
933 } else {
934 ShenandoahMarkUpdateRefsClosure cl(q, rp);
935 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
936 }
937 } else {
938 if (strdedup) {
939 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
940 ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
941 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
942 } else {
943 ShenandoahMarkRefsClosure cl(q, rp);
944 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
945 }
946 }
947 }
948
949
950 for (uint i = 0; i < _heap->num_regions(); i++) {
951 ShenandoahHeapRegion* r = _heap->get_region(i);
952 jushort live = ld[i];
953 if (live > 0) {
954 r->increase_live_data_gc_words(live);
955 }
956 }
957 }
958
959 template <class T, bool CANCELLABLE, bool DRAIN_SATB>
960 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
961 int seed = 17;
962 uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
963
964 ShenandoahHeap* heap = ShenandoahHeap::heap();
965 ShenandoahObjToScanQueueSet* queues = task_queues();
966 ShenandoahObjToScanQueue* q;
967 ShenandoahMarkTask t;
968
969 /*
970 * Process outstanding queues, if any.
971 *
972 * There can be more queues than workers. To deal with the imbalance, we claim
973 * extra queues first. Since marking can push new tasks into the queue associated
974 * with this worker id, we come back to process this queue in the normal loop.
975 */
976 assert(queues->get_reserved() == heap->workers()->active_workers(),
977 "Need to reserve proper number of queues");
978
979 q = queues->claim_next();
992 q = queues->claim_next();
993 break;
994 }
995 }
996 }
997 q = get_queue(worker_id);
998
999 ShenandoahSATBBufferClosure drain_satb(q);
1000 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
1001
1002 /*
1003 * Normal marking loop:
1004 */
1005 while (true) {
1006 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
1007 ShenandoahCancelledTerminatorTerminator tt;
1008 while (!terminator->offer_termination(&tt));
1009 return;
1010 }
1011
1012 if (DRAIN_SATB) {
1013 while (satb_mq_set.completed_buffers_num() > 0) {
1014 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
1015 }
1016 }
1017
1018 uint work = 0;
1019 for (uint i = 0; i < stride; i++) {
1020 if (try_queue(q, t) ||
1021 queues->steal(worker_id, &seed, t)) {
1022 do_task<T>(q, cl, live_data, &t);
1023 work++;
1024 } else {
1025 break;
1026 }
1027 }
1028
1029 if (work == 0) {
1030 // No work encountered in current stride, try to terminate.
1031 // Need to leave the STS here otherwise it might block safepoints.
1032 SuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1033 if (terminator->offer_termination()) return;
1034 }
1035 }
|
188 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
189 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
190 }
191
192
193 void work(uint worker_id) {
194 SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
195 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
196 jushort* live_data = _cm->get_liveness(worker_id);
197 ReferenceProcessor* rp;
198 if (_cm->process_references()) {
199 rp = ShenandoahHeap::heap()->ref_processor();
200 shenandoah_assert_rp_isalive_installed();
201 } else {
202 rp = NULL;
203 }
204
205 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
206 _cm->mark_loop(worker_id, _terminator, rp,
207 true, // cancellable
208 _cm->unload_classes(),
209 _update_refs,
210 ShenandoahStringDedup::is_enabled()); // perform string dedup
211 }
212 };
213
214 class ShenandoahFinalMarkingTask : public AbstractGangTask {
215 private:
216 ShenandoahConcurrentMark* _cm;
217 ParallelTaskTerminator* _terminator;
218 bool _update_refs;
219 bool _unload_classes;
220 bool _dedup_string;
221
222 public:
223 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator,
224 bool update_refs, bool unload_classes, bool dedup_string) :
225 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator),
226 _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) {
227 }
228
229 void work(uint worker_id) {
230 // First drain remaining SATB buffers.
231 // Notice that this is not strictly necessary for mark-compact. But since
232 // it requires a StrongRootsScope around the task, we need to claim the
233 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
234 // full-gc.
235 _cm->drain_satb_buffers(worker_id);
236
237 ReferenceProcessor* rp;
238 if (_cm->process_references()) {
239 rp = ShenandoahHeap::heap()->ref_processor();
240 shenandoah_assert_rp_isalive_installed();
241 } else {
242 rp = NULL;
243 }
244
245 // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
246 // let's check here.
247 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
248 _cm->mark_loop(worker_id, _terminator, rp,
249 false, // not cancellable
250 _unload_classes,
251 _update_refs,
252 _dedup_string);
253
254 assert(_cm->task_queues()->is_empty(), "Should be empty");
255 }
256 };
257
258 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
259 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
260 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
261
262 ShenandoahHeap* heap = ShenandoahHeap::heap();
263
264 ShenandoahGCPhase phase(root_phase);
265
266 WorkGang* workers = heap->workers();
267 uint nworkers = workers->active_workers();
268
269 assert(nworkers <= task_queues()->size(), "Just check");
480
481 public:
482 ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
483 _satb_cl(satb_cl),
484 _thread_parity(Threads::thread_claim_parity()) {}
485
486 void do_thread(Thread* thread) {
487 if (thread->is_Java_thread()) {
488 if (thread->claim_oops_do(true, _thread_parity)) {
489 JavaThread* jt = (JavaThread*)thread;
490 ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
491 }
492 } else if (thread->is_VM_thread()) {
493 if (thread->claim_oops_do(true, _thread_parity)) {
494 ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
495 }
496 }
497 }
498 };
499
500 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id) {
501 ShenandoahObjToScanQueue* q = get_queue(worker_id);
502 ShenandoahSATBBufferClosure cl(q);
503 ShenandoahSATBThreadsClosure tc(&cl);
504 Threads::threads_do(&tc);
505 }
506
507 #if TASKQUEUE_STATS
508 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
509 st->print_raw_cr("GC Task Stats");
510 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
511 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
512 }
513
514 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
515 if (!log_develop_is_enabled(Trace, gc, task, stats)) {
516 return;
517 }
518 Log(gc, task, stats) log;
519 ResourceMark rm;
520 LogStream ls(log.trace());
521 outputStream* st = &ls;
522 print_taskqueue_stats_hdr(st);
523
524 TaskQueueStats totals;
550
551 public:
552 ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
553 _worker_id(worker_id),
554 _terminator(t),
555 _reset_terminator(reset_terminator) {
556 }
557
558 void do_void() {
559 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
560
561 ShenandoahHeap* sh = ShenandoahHeap::heap();
562 ShenandoahConcurrentMark* scm = sh->concurrentMark();
563 assert(scm->process_references(), "why else would we be here?");
564 ReferenceProcessor* rp = sh->ref_processor();
565
566 shenandoah_assert_rp_isalive_installed();
567
568 scm->mark_loop(_worker_id, _terminator, rp,
569 false, // not cancellable
570 scm->unload_classes(),
571 sh->has_forwarded_objects(),
572 false); // do not do strdedup
573
574 if (_reset_terminator) {
575 _terminator->reset_for_reuse();
576 }
577 }
578 };
579
580
581 class ShenandoahCMKeepAliveClosure : public OopClosure {
582 private:
583 ShenandoahObjToScanQueue* _queue;
584 ShenandoahHeap* _heap;
585
586 template <class T>
587 inline void do_oop_nv(T* p) {
588 ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue);
589 }
775 private:
776 ShenandoahHeap* const _heap;
777 public:
778 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
779 virtual bool should_return() { return _heap->cancelled_gc(); }
780 };
781
782 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
783 public:
784 void do_void() {
785 ShenandoahHeap* sh = ShenandoahHeap::heap();
786 ShenandoahConcurrentMark* scm = sh->concurrentMark();
787 assert(scm->process_references(), "why else would we be here?");
788 ParallelTaskTerminator terminator(1, scm->task_queues());
789
790 ReferenceProcessor* rp = sh->ref_processor();
791 shenandoah_assert_rp_isalive_installed();
792
793 scm->mark_loop(0, &terminator, rp,
794 false, // not cancellable
795 scm->unload_classes(),
796 sh->has_forwarded_objects(),
797 false); // do not do strdedup
798 }
799 };
800
801 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
802 private:
803 ShenandoahObjToScanQueue* _queue;
804 ShenandoahHeap* _heap;
805
806 template <class T>
807 inline void do_oop_nv(T* p) {
808 ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue);
809 }
810
811 public:
812 ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
813 _queue(q), _heap(ShenandoahHeap::heap()) {}
814
867 void ShenandoahConcurrentMark::cancel() {
868 // Clean up marking stacks.
869 ShenandoahObjToScanQueueSet* queues = task_queues();
870 queues->clear();
871
872 // Cancel SATB buffers.
873 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
874 }
875
876 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
877 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
878 return _task_queues->queue(worker_id);
879 }
880
881 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
882 q->set_empty();
883 q->overflow_stack()->clear();
884 q->clear_buffer();
885 }
886
887 template <bool CANCELLABLE>
888 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
889 bool class_unload, bool update_refs, bool strdedup) {
890 ShenandoahObjToScanQueue* q = get_queue(w);
891
892 jushort* ld = get_liveness(w);
893 Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
894
895 // TODO: We can clean up this if we figure out how to do templated oop closures that
896 // play nice with specialized_oop_iterators.
897 if (class_unload) {
898 if (update_refs) {
899 if (strdedup) {
900 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
901 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
902 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
903 } else {
904 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
905 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
906 }
907 } else {
908 if (strdedup) {
909 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
910 ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
911 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
912 } else {
913 ShenandoahMarkRefsMetadataClosure cl(q, rp);
914 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
915 }
916 }
917 } else {
918 if (update_refs) {
919 if (strdedup) {
920 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
921 ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
922 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
923 } else {
924 ShenandoahMarkUpdateRefsClosure cl(q, rp);
925 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
926 }
927 } else {
928 if (strdedup) {
929 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
930 ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
931 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
932 } else {
933 ShenandoahMarkRefsClosure cl(q, rp);
934 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
935 }
936 }
937 }
938
939
940 for (uint i = 0; i < _heap->num_regions(); i++) {
941 ShenandoahHeapRegion* r = _heap->get_region(i);
942 jushort live = ld[i];
943 if (live > 0) {
944 r->increase_live_data_gc_words(live);
945 }
946 }
947 }
948
949 template <class T, bool CANCELLABLE>
950 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
951 int seed = 17;
952 uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
953
954 ShenandoahHeap* heap = ShenandoahHeap::heap();
955 ShenandoahObjToScanQueueSet* queues = task_queues();
956 ShenandoahObjToScanQueue* q;
957 ShenandoahMarkTask t;
958
959 /*
960 * Process outstanding queues, if any.
961 *
962 * There can be more queues than workers. To deal with the imbalance, we claim
963 * extra queues first. Since marking can push new tasks into the queue associated
964 * with this worker id, we come back to process this queue in the normal loop.
965 */
966 assert(queues->get_reserved() == heap->workers()->active_workers(),
967 "Need to reserve proper number of queues");
968
969 q = queues->claim_next();
982 q = queues->claim_next();
983 break;
984 }
985 }
986 }
987 q = get_queue(worker_id);
988
989 ShenandoahSATBBufferClosure drain_satb(q);
990 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
991
992 /*
993 * Normal marking loop:
994 */
995 while (true) {
996 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
997 ShenandoahCancelledTerminatorTerminator tt;
998 while (!terminator->offer_termination(&tt));
999 return;
1000 }
1001
1002 while (satb_mq_set.completed_buffers_num() > 0) {
1003 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
1004 }
1005
1006 uint work = 0;
1007 for (uint i = 0; i < stride; i++) {
1008 if (try_queue(q, t) ||
1009 queues->steal(worker_id, &seed, t)) {
1010 do_task<T>(q, cl, live_data, &t);
1011 work++;
1012 } else {
1013 break;
1014 }
1015 }
1016
1017 if (work == 0) {
1018 // No work encountered in current stride, try to terminate.
1019 // Need to leave the STS here otherwise it might block safepoints.
1020 SuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1021 if (terminator->offer_termination()) return;
1022 }
1023 }
|