188 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
189 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
190 }
191
192
193 void work(uint worker_id) {
194 SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
195 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
196 jushort* live_data = _cm->get_liveness(worker_id);
197 ReferenceProcessor* rp;
198 if (_cm->process_references()) {
199 rp = ShenandoahHeap::heap()->ref_processor();
200 shenandoah_assert_rp_isalive_installed();
201 } else {
202 rp = NULL;
203 }
204
205 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
206 _cm->mark_loop(worker_id, _terminator, rp,
207 true, // cancellable
208 true, // drain SATBs as we go
209 _cm->unload_classes(),
210 _update_refs,
211 ShenandoahStringDedup::is_enabled()); // perform string dedup
212 }
213 };
214
215 class ShenandoahFinalMarkingTask : public AbstractGangTask {
216 private:
217 ShenandoahConcurrentMark* _cm;
218 ParallelTaskTerminator* _terminator;
219 bool _update_refs;
220 bool _unload_classes;
221 bool _dedup_string;
222
223 public:
224 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator,
225 bool update_refs, bool unload_classes, bool dedup_string) :
226 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator),
227 _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) {
228 }
229
230 void work(uint worker_id) {
231 // First drain remaining SATB buffers.
232 // Notice that this is not strictly necessary for mark-compact. But since
233 // it requires a StrongRootsScope around the task, we need to claim the
234 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
235 // full-gc.
236 _cm->drain_satb_buffers(worker_id, true);
237
238 ReferenceProcessor* rp;
239 if (_cm->process_references()) {
240 rp = ShenandoahHeap::heap()->ref_processor();
241 shenandoah_assert_rp_isalive_installed();
242 } else {
243 rp = NULL;
244 }
245
246 // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
247 // let's check here.
248 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
249 _cm->mark_loop(worker_id, _terminator, rp,
250 false, // not cancellable
251 false, // do not drain SATBs, already drained
252 _unload_classes,
253 _update_refs,
254 _dedup_string);
255
256 assert(_cm->task_queues()->is_empty(), "Should be empty");
257 }
258 };
259
260 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
261 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
262 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
263
264 ShenandoahHeap* heap = ShenandoahHeap::heap();
265
266 ShenandoahGCPhase phase(root_phase);
267
268 WorkGang* workers = heap->workers();
269 uint nworkers = workers->active_workers();
270
271 assert(nworkers <= task_queues()->size(), "Just check");
459 sh->workers()->run_task(&task);
460 }
461 }
462
463 assert(task_queues()->is_empty(), "Should be empty");
464
465 // When we're done marking everything, we process weak references.
466 if (process_references()) {
467 weak_refs_work(full_gc);
468 }
469
470 // And finally finish class unloading
471 if (unload_classes()) {
472 sh->unload_classes_and_cleanup_tables(full_gc);
473 }
474
475 assert(task_queues()->is_empty(), "Should be empty");
476
477 }
478
479 class ShenandoahSATBThreadsClosure : public ThreadClosure {
480 ShenandoahSATBBufferClosure* _satb_cl;
481 int _thread_parity;
482
483 public:
484 ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
485 _satb_cl(satb_cl),
486 _thread_parity(Threads::thread_claim_parity()) {}
487
488 void do_thread(Thread* thread) {
489 if (thread->is_Java_thread()) {
490 if (thread->claim_oops_do(true, _thread_parity)) {
491 JavaThread* jt = (JavaThread*)thread;
492 ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
493 }
494 } else if (thread->is_VM_thread()) {
495 if (thread->claim_oops_do(true, _thread_parity)) {
496 ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
497 }
498 }
499 }
500 };
501
502 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
503 ShenandoahObjToScanQueue* q = get_queue(worker_id);
504 ShenandoahSATBBufferClosure cl(q);
505
506 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
507 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
508
509 if (remark) {
510 ShenandoahSATBThreadsClosure tc(&cl);
511 Threads::threads_do(&tc);
512 }
513 }
514
515 #if TASKQUEUE_STATS
516 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
517 st->print_raw_cr("GC Task Stats");
518 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
519 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
520 }
521
522 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
523 if (!log_develop_is_enabled(Trace, gc, task, stats)) {
524 return;
525 }
526 Log(gc, task, stats) log;
527 ResourceMark rm;
528 LogStream ls(log.trace());
529 outputStream* st = &ls;
530 print_taskqueue_stats_hdr(st);
531
532 TaskQueueStats totals;
533 const uint n = _task_queues->size();
534 for (uint i = 0; i < n; ++i) {
558
559 public:
560 ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
561 _worker_id(worker_id),
562 _terminator(t),
563 _reset_terminator(reset_terminator) {
564 }
565
566 void do_void() {
567 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
568
569 ShenandoahHeap* sh = ShenandoahHeap::heap();
570 ShenandoahConcurrentMark* scm = sh->concurrentMark();
571 assert(scm->process_references(), "why else would we be here?");
572 ReferenceProcessor* rp = sh->ref_processor();
573
574 shenandoah_assert_rp_isalive_installed();
575
576 scm->mark_loop(_worker_id, _terminator, rp,
577 false, // not cancellable
578 false, // do not drain SATBs
579 scm->unload_classes(),
580 sh->has_forwarded_objects(),
581 false); // do not do strdedup
582
583 if (_reset_terminator) {
584 _terminator->reset_for_reuse();
585 }
586 }
587 };
588
589
590 class ShenandoahCMKeepAliveClosure : public OopClosure {
591 private:
592 ShenandoahObjToScanQueue* _queue;
593 ShenandoahHeap* _heap;
594
595 template <class T>
596 inline void do_oop_nv(T* p) {
597 ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue);
598 }
784 private:
785 ShenandoahHeap* const _heap;
786 public:
787 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
788 virtual bool should_return() { return _heap->cancelled_gc(); }
789 };
790
791 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
792 public:
793 void do_void() {
794 ShenandoahHeap* sh = ShenandoahHeap::heap();
795 ShenandoahConcurrentMark* scm = sh->concurrentMark();
796 assert(scm->process_references(), "why else would we be here?");
797 ParallelTaskTerminator terminator(1, scm->task_queues());
798
799 ReferenceProcessor* rp = sh->ref_processor();
800 shenandoah_assert_rp_isalive_installed();
801
802 scm->mark_loop(0, &terminator, rp,
803 false, // not cancellable
804 true, // drain SATBs
805 scm->unload_classes(),
806 sh->has_forwarded_objects(),
807 false); // do not do strdedup
808 }
809 };
810
811 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
812 private:
813 ShenandoahObjToScanQueue* _queue;
814 ShenandoahHeap* _heap;
815
816 template <class T>
817 inline void do_oop_nv(T* p) {
818 ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue);
819 }
820
821 public:
822 ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
823 _queue(q), _heap(ShenandoahHeap::heap()) {}
824
877 void ShenandoahConcurrentMark::cancel() {
878 // Clean up marking stacks.
879 ShenandoahObjToScanQueueSet* queues = task_queues();
880 queues->clear();
881
882 // Cancel SATB buffers.
883 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
884 }
885
886 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
887 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
888 return _task_queues->queue(worker_id);
889 }
890
891 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
892 q->set_empty();
893 q->overflow_stack()->clear();
894 q->clear_buffer();
895 }
896
897 template <bool CANCELLABLE, bool DRAIN_SATB>
898 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
899 bool class_unload, bool update_refs, bool strdedup) {
900 ShenandoahObjToScanQueue* q = get_queue(w);
901
902 jushort* ld = get_liveness(w);
903 Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
904
905 // TODO: We can clean up this if we figure out how to do templated oop closures that
906 // play nice with specialized_oop_iterators.
907 if (class_unload) {
908 if (update_refs) {
909 if (strdedup) {
910 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
911 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
912 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
913 } else {
914 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
915 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
916 }
917 } else {
918 if (strdedup) {
919 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
920 ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
921 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
922 } else {
923 ShenandoahMarkRefsMetadataClosure cl(q, rp);
924 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
925 }
926 }
927 } else {
928 if (update_refs) {
929 if (strdedup) {
930 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
931 ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
932 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
933 } else {
934 ShenandoahMarkUpdateRefsClosure cl(q, rp);
935 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
936 }
937 } else {
938 if (strdedup) {
939 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
940 ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
941 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
942 } else {
943 ShenandoahMarkRefsClosure cl(q, rp);
944 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB>(&cl, ld, w, t);
945 }
946 }
947 }
948
949
950 for (uint i = 0; i < _heap->num_regions(); i++) {
951 ShenandoahHeapRegion* r = _heap->get_region(i);
952 jushort live = ld[i];
953 if (live > 0) {
954 r->increase_live_data_gc_words(live);
955 }
956 }
957 }
958
959 template <class T, bool CANCELLABLE, bool DRAIN_SATB>
960 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
961 int seed = 17;
962 uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
963
964 ShenandoahHeap* heap = ShenandoahHeap::heap();
965 ShenandoahObjToScanQueueSet* queues = task_queues();
966 ShenandoahObjToScanQueue* q;
967 ShenandoahMarkTask t;
968
969 /*
970 * Process outstanding queues, if any.
971 *
972 * There can be more queues than workers. To deal with the imbalance, we claim
973 * extra queues first. Since marking can push new tasks into the queue associated
974 * with this worker id, we come back to process this queue in the normal loop.
975 */
976 assert(queues->get_reserved() == heap->workers()->active_workers(),
977 "Need to reserve proper number of queues");
978
979 q = queues->claim_next();
992 q = queues->claim_next();
993 break;
994 }
995 }
996 }
997 q = get_queue(worker_id);
998
999 ShenandoahSATBBufferClosure drain_satb(q);
1000 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
1001
1002 /*
1003 * Normal marking loop:
1004 */
1005 while (true) {
1006 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
1007 ShenandoahCancelledTerminatorTerminator tt;
1008 while (!terminator->offer_termination(&tt));
1009 return;
1010 }
1011
1012 if (DRAIN_SATB) {
1013 while (satb_mq_set.completed_buffers_num() > 0) {
1014 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
1015 }
1016 }
1017
1018 uint work = 0;
1019 for (uint i = 0; i < stride; i++) {
1020 if (try_queue(q, t) ||
1021 queues->steal(worker_id, &seed, t)) {
1022 do_task<T>(q, cl, live_data, &t);
1023 work++;
1024 } else {
1025 break;
1026 }
1027 }
1028
1029 if (work == 0) {
1030 // No work encountered in current stride, try to terminate.
1031 // Need to leave the STS here otherwise it might block safepoints.
1032 SuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1033 if (terminator->offer_termination()) return;
1034 }
1035 }
|
188 ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
189 AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
190 }
191
192
193 void work(uint worker_id) {
194 SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
195 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
196 jushort* live_data = _cm->get_liveness(worker_id);
197 ReferenceProcessor* rp;
198 if (_cm->process_references()) {
199 rp = ShenandoahHeap::heap()->ref_processor();
200 shenandoah_assert_rp_isalive_installed();
201 } else {
202 rp = NULL;
203 }
204
205 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
206 _cm->mark_loop(worker_id, _terminator, rp,
207 true, // cancellable
208 _cm->unload_classes(),
209 _update_refs,
210 ShenandoahStringDedup::is_enabled()); // perform string dedup
211 }
212 };
213
214 class ShenandoahSATBThreadsClosure : public ThreadClosure {
215 ShenandoahSATBBufferClosure* _satb_cl;
216 int _thread_parity;
217
218 public:
219 ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
220 _satb_cl(satb_cl),
221 _thread_parity(Threads::thread_claim_parity()) {}
222
223 void do_thread(Thread* thread) {
224 if (thread->is_Java_thread()) {
225 if (thread->claim_oops_do(true, _thread_parity)) {
226 JavaThread* jt = (JavaThread*)thread;
227 ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
228 }
229 } else if (thread->is_VM_thread()) {
230 if (thread->claim_oops_do(true, _thread_parity)) {
231 ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
232 }
233 }
234 }
235 };
236
237 class ShenandoahFinalMarkingTask : public AbstractGangTask {
238 private:
239 ShenandoahConcurrentMark* _cm;
240 ParallelTaskTerminator* _terminator;
241 bool _update_refs;
242 bool _unload_classes;
243 bool _dedup_string;
244
245 public:
246 ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator,
247 bool update_refs, bool unload_classes, bool dedup_string) :
248 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator),
249 _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) {
250 }
251
252 void work(uint worker_id) {
253 // First drain remaining SATB buffers.
254 // Notice that this is not strictly necessary for mark-compact. But since
255 // it requires a StrongRootsScope around the task, we need to claim the
256 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
257 // full-gc.
258 ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
259 ShenandoahSATBBufferClosure cl(q);
260 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
261 while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
262 ShenandoahSATBThreadsClosure tc(&cl);
263 Threads::threads_do(&tc);
264
265 ReferenceProcessor* rp;
266 if (_cm->process_references()) {
267 rp = ShenandoahHeap::heap()->ref_processor();
268 shenandoah_assert_rp_isalive_installed();
269 } else {
270 rp = NULL;
271 }
272
273 // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
274 // let's check here.
275 _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
276 _cm->mark_loop(worker_id, _terminator, rp,
277 false, // not cancellable
278 _unload_classes,
279 _update_refs,
280 _dedup_string);
281
282 assert(_cm->task_queues()->is_empty(), "Should be empty");
283 }
284 };
285
286 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
287 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
288 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
289
290 ShenandoahHeap* heap = ShenandoahHeap::heap();
291
292 ShenandoahGCPhase phase(root_phase);
293
294 WorkGang* workers = heap->workers();
295 uint nworkers = workers->active_workers();
296
297 assert(nworkers <= task_queues()->size(), "Just check");
485 sh->workers()->run_task(&task);
486 }
487 }
488
489 assert(task_queues()->is_empty(), "Should be empty");
490
491 // When we're done marking everything, we process weak references.
492 if (process_references()) {
493 weak_refs_work(full_gc);
494 }
495
496 // And finally finish class unloading
497 if (unload_classes()) {
498 sh->unload_classes_and_cleanup_tables(full_gc);
499 }
500
501 assert(task_queues()->is_empty(), "Should be empty");
502
503 }
504
505 #if TASKQUEUE_STATS
506 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
507 st->print_raw_cr("GC Task Stats");
508 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
509 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
510 }
511
512 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
513 if (!log_develop_is_enabled(Trace, gc, task, stats)) {
514 return;
515 }
516 Log(gc, task, stats) log;
517 ResourceMark rm;
518 LogStream ls(log.trace());
519 outputStream* st = &ls;
520 print_taskqueue_stats_hdr(st);
521
522 TaskQueueStats totals;
523 const uint n = _task_queues->size();
524 for (uint i = 0; i < n; ++i) {
548
549 public:
550 ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
551 _worker_id(worker_id),
552 _terminator(t),
553 _reset_terminator(reset_terminator) {
554 }
555
556 void do_void() {
557 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
558
559 ShenandoahHeap* sh = ShenandoahHeap::heap();
560 ShenandoahConcurrentMark* scm = sh->concurrentMark();
561 assert(scm->process_references(), "why else would we be here?");
562 ReferenceProcessor* rp = sh->ref_processor();
563
564 shenandoah_assert_rp_isalive_installed();
565
566 scm->mark_loop(_worker_id, _terminator, rp,
567 false, // not cancellable
568 scm->unload_classes(),
569 sh->has_forwarded_objects(),
570 false); // do not do strdedup
571
572 if (_reset_terminator) {
573 _terminator->reset_for_reuse();
574 }
575 }
576 };
577
578
579 class ShenandoahCMKeepAliveClosure : public OopClosure {
580 private:
581 ShenandoahObjToScanQueue* _queue;
582 ShenandoahHeap* _heap;
583
584 template <class T>
585 inline void do_oop_nv(T* p) {
586 ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue);
587 }
773 private:
774 ShenandoahHeap* const _heap;
775 public:
776 ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
777 virtual bool should_return() { return _heap->cancelled_gc(); }
778 };
779
780 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
781 public:
782 void do_void() {
783 ShenandoahHeap* sh = ShenandoahHeap::heap();
784 ShenandoahConcurrentMark* scm = sh->concurrentMark();
785 assert(scm->process_references(), "why else would we be here?");
786 ParallelTaskTerminator terminator(1, scm->task_queues());
787
788 ReferenceProcessor* rp = sh->ref_processor();
789 shenandoah_assert_rp_isalive_installed();
790
791 scm->mark_loop(0, &terminator, rp,
792 false, // not cancellable
793 scm->unload_classes(),
794 sh->has_forwarded_objects(),
795 false); // do not do strdedup
796 }
797 };
798
799 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
800 private:
801 ShenandoahObjToScanQueue* _queue;
802 ShenandoahHeap* _heap;
803
804 template <class T>
805 inline void do_oop_nv(T* p) {
806 ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue);
807 }
808
809 public:
810 ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
811 _queue(q), _heap(ShenandoahHeap::heap()) {}
812
865 void ShenandoahConcurrentMark::cancel() {
866 // Clean up marking stacks.
867 ShenandoahObjToScanQueueSet* queues = task_queues();
868 queues->clear();
869
870 // Cancel SATB buffers.
871 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
872 }
873
874 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
875 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
876 return _task_queues->queue(worker_id);
877 }
878
879 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
880 q->set_empty();
881 q->overflow_stack()->clear();
882 q->clear_buffer();
883 }
884
885 template <bool CANCELLABLE>
886 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
887 bool class_unload, bool update_refs, bool strdedup) {
888 ShenandoahObjToScanQueue* q = get_queue(w);
889
890 jushort* ld = get_liveness(w);
891 Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
892
893 // TODO: We can clean up this if we figure out how to do templated oop closures that
894 // play nice with specialized_oop_iterators.
895 if (class_unload) {
896 if (update_refs) {
897 if (strdedup) {
898 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
899 ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
900 mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
901 } else {
902 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
903 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
904 }
905 } else {
906 if (strdedup) {
907 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
908 ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
909 mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
910 } else {
911 ShenandoahMarkRefsMetadataClosure cl(q, rp);
912 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
913 }
914 }
915 } else {
916 if (update_refs) {
917 if (strdedup) {
918 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
919 ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
920 mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
921 } else {
922 ShenandoahMarkUpdateRefsClosure cl(q, rp);
923 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
924 }
925 } else {
926 if (strdedup) {
927 ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
928 ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
929 mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
930 } else {
931 ShenandoahMarkRefsClosure cl(q, rp);
932 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
933 }
934 }
935 }
936
937
938 for (uint i = 0; i < _heap->num_regions(); i++) {
939 ShenandoahHeapRegion* r = _heap->get_region(i);
940 jushort live = ld[i];
941 if (live > 0) {
942 r->increase_live_data_gc_words(live);
943 }
944 }
945 }
946
947 template <class T, bool CANCELLABLE>
948 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
949 int seed = 17;
950 uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
951
952 ShenandoahHeap* heap = ShenandoahHeap::heap();
953 ShenandoahObjToScanQueueSet* queues = task_queues();
954 ShenandoahObjToScanQueue* q;
955 ShenandoahMarkTask t;
956
957 /*
958 * Process outstanding queues, if any.
959 *
960 * There can be more queues than workers. To deal with the imbalance, we claim
961 * extra queues first. Since marking can push new tasks into the queue associated
962 * with this worker id, we come back to process this queue in the normal loop.
963 */
964 assert(queues->get_reserved() == heap->workers()->active_workers(),
965 "Need to reserve proper number of queues");
966
967 q = queues->claim_next();
980 q = queues->claim_next();
981 break;
982 }
983 }
984 }
985 q = get_queue(worker_id);
986
987 ShenandoahSATBBufferClosure drain_satb(q);
988 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
989
990 /*
991 * Normal marking loop:
992 */
993 while (true) {
994 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
995 ShenandoahCancelledTerminatorTerminator tt;
996 while (!terminator->offer_termination(&tt));
997 return;
998 }
999
1000 while (satb_mq_set.completed_buffers_num() > 0) {
1001 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
1002 }
1003
1004 uint work = 0;
1005 for (uint i = 0; i < stride; i++) {
1006 if (try_queue(q, t) ||
1007 queues->steal(worker_id, &seed, t)) {
1008 do_task<T>(q, cl, live_data, &t);
1009 work++;
1010 } else {
1011 break;
1012 }
1013 }
1014
1015 if (work == 0) {
1016 // No work encountered in current stride, try to terminate.
1017 // Need to leave the STS here otherwise it might block safepoints.
1018 SuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1019 if (terminator->offer_termination()) return;
1020 }
1021 }
|