33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
35 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
36 #include "gc/shenandoah/brooksPointer.hpp"
37 #include "gc/shared/referenceProcessor.hpp"
38 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
39 #include "code/codeCache.hpp"
40 #include "classfile/symbolTable.hpp"
41 #include "classfile/systemDictionary.hpp"
42 #include "memory/iterator.inline.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "gc/shared/taskqueue.inline.hpp"
45
46 class ShenandoahInitMarkRootsClosure : public OopClosure {
47 private:
48 SCMObjToScanQueue* _queue;
49 ShenandoahHeap* _heap;
50
51 template <class T>
52 inline void do_oop_nv(T* p) {
53 ShenandoahConcurrentMark::mark_through_ref<T, RESOLVE>(p, _heap, _queue);
54 }
55
56 public:
57 ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
58 _queue(q), _heap(ShenandoahHeap::heap()) {};
59
60 void do_oop(narrowOop* p) { do_oop_nv(p); }
61 void do_oop(oop* p) { do_oop_nv(p); }
62 };
63
64 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
65 MetadataAwareOopClosure(rp),
66 _queue(q),
67 _heap((ShenandoahHeap*) Universe::heap())
68 {
69 }
70
71 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
72 private:
73 ShenandoahRootProcessor* _rp;
74 bool _process_refs;
75 public:
76 ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
77 AbstractGangTask("Shenandoah init mark roots task"),
78 _rp(rp),
79 _process_refs(process_refs) {
80 }
81
82 void work(uint worker_id) {
83 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
84
85 ShenandoahHeap* heap = ShenandoahHeap::heap();
86 SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
87 assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
172 ReferenceProcessor* rp;
173 if (_cm->process_references()) {
174 rp = ShenandoahHeap::heap()->ref_processor();
175 } else {
176 rp = NULL;
177 }
178 if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
179 if (! _cm->unload_classes()) {
180 ShenandoahMarkResolveRefsClosure cl(q, rp);
181 CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
182 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
183 CodeCache::blobs_do(&blobs);
184 }
185 }
186
187 _cm->mark_loop(worker_id, _terminator, rp,
188 true, // cancellable
189 true, // drain SATBs as we go
190 true, // count liveness
191 _cm->unload_classes(),
192 _update_refs);
193 }
194 };
195
196 class SCMFinalMarkingTask : public AbstractGangTask {
197 private:
198 ShenandoahConcurrentMark* _cm;
199 ParallelTaskTerminator* _terminator;
200 bool _update_refs;
201 bool _count_live;
202 bool _unload_classes;
203
204 public:
205 SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
206 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {
207 }
208
209 void work(uint worker_id) {
210 // First drain remaining SATB buffers.
211 // Notice that this is not strictly necessary for mark-compact. But since
212 // it requires a StrongRootsScope around the task, we need to claim the
213 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
214 // full-gc.
215 _cm->drain_satb_buffers(worker_id, true);
216
217 ReferenceProcessor* rp;
218 if (_cm->process_references()) {
219 rp = ShenandoahHeap::heap()->ref_processor();
220 } else {
221 rp = NULL;
222 }
223
224 _cm->mark_loop(worker_id, _terminator, rp,
225 false, // not cancellable
226 false, // do not drain SATBs, already drained
227 _count_live,
228 _unload_classes,
229 _update_refs);
230
231 assert(_cm->task_queues()->is_empty(), "Should be empty");
232 }
233 };
234
235 void ShenandoahConcurrentMark::mark_roots() {
236 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
237 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
238
239 ShenandoahHeap* heap = ShenandoahHeap::heap();
240
241 ClassLoaderDataGraph::clear_claimed_marks();
242 WorkGang* workers = heap->workers();
243 uint nworkers = workers->active_workers();
244
245 assert(nworkers <= task_queues()->size(), "Just check");
246
247 ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
248 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
249 task_queues()->reserve(nworkers);
597 }
598
599
600 void do_void() {
601 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
602
603 ShenandoahHeap* sh = ShenandoahHeap::heap();
604 ShenandoahConcurrentMark* scm = sh->concurrentMark();
605 ReferenceProcessor* rp;
606 if (scm->process_references()) {
607 rp = ShenandoahHeap::heap()->ref_processor();
608 } else {
609 rp = NULL;
610 }
611
612 scm->mark_loop(_worker_id, _terminator, rp,
613 false, // not cancellable
614 false, // do not drain SATBs
615 true, // count liveness
616 scm->unload_classes(),
617 sh->need_update_refs());
618 }
619 };
620
621
622 class ShenandoahCMKeepAliveClosure : public OopClosure {
623 private:
624 SCMObjToScanQueue* _queue;
625 ShenandoahHeap* _heap;
626
627 template <class T>
628 inline void do_oop_nv(T* p) {
629 ShenandoahConcurrentMark::mark_through_ref<T, NONE>(p, _heap, _queue);
630 }
631
632 public:
633 ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
634 _queue(q), _heap(ShenandoahHeap::heap()) {};
635
636 void do_oop(narrowOop* p) { do_oop_nv(p); }
637 void do_oop(oop* p) { do_oop_nv(p); }
638 };
639
640 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
641 private:
642 SCMObjToScanQueue* _queue;
643 ShenandoahHeap* _heap;
644
645 template <class T>
646 inline void do_oop_nv(T* p) {
647 ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE>(p, _heap, _queue);
648 }
649
650 public:
651 ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
652 _queue(q), _heap(ShenandoahHeap::heap()) {};
653
654 void do_oop(narrowOop* p) { do_oop_nv(p); }
655 void do_oop(oop* p) { do_oop_nv(p); }
656 };
657
658 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
659
660 private:
661 AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
662 ParallelTaskTerminator* _terminator;
663 public:
664
665 ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
666 ParallelTaskTerminator* t) :
667 AbstractGangTask("Process reference objects in parallel"),
668 _proc_task(proc_task),
669 _terminator(t) {
670 }
671
672 void work(uint worker_id) {
673 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
674 ShenandoahHeap* heap = ShenandoahHeap::heap();
675 ShenandoahForwardedIsAliveClosure is_alive;
676 ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
677 if (heap->need_update_refs()) {
678 ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
679 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
680 } else {
681 ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
682 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
683 }
684 }
685 };
686
687 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
688
689 private:
690 AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
691
692 public:
693
694 ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
695 AbstractGangTask("Enqueue reference objects in parallel"),
696 _enqueue_task(enqueue_task) {
697 }
698
699 void work(uint worker_id) {
700 _enqueue_task.work(worker_id);
701 }
702 };
703
704 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
799
800 // Clean up marking stacks.
801 SCMObjToScanQueueSet* queues = task_queues();
802 queues->clear();
803
804 // Cancel SATB buffers.
805 JavaThread::satb_mark_queue_set().abandon_partial_marking();
806 }
807
808 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
809 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
810 return _task_queues->queue(worker_id);
811 }
812
813 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
814 q->set_empty();
815 q->overflow_stack()->clear();
816 q->clear_buffer();
817 }
818
819 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS>
820 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
821 SCMObjToScanQueue* q = get_queue(w);
822
823 jushort* ld;
824 if (COUNT_LIVENESS) {
825 ld = get_liveness(w);
826 Copy::fill_to_bytes(ld, _heap->max_regions() * sizeof(jushort));
827 } else {
828 ld = NULL;
829 }
830
831 // TODO: We can clean up this if we figure out how to do templated oop closures that
832 // play nice with specialized_oop_iterators.
833 if (CLASS_UNLOAD) {
834 if (UPDATE_REFS) {
835 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
836 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
837 } else {
838 ShenandoahMarkRefsMetadataClosure cl(q, rp);
839 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
840 }
841 } else {
842 if (UPDATE_REFS) {
843 ShenandoahMarkUpdateRefsClosure cl(q, rp);
844 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
845 } else {
846 ShenandoahMarkRefsClosure cl(q, rp);
847 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
848 }
849 }
850
851 if (COUNT_LIVENESS) {
852 for (uint i = 0; i < _heap->max_regions(); i++) {
853 ShenandoahHeapRegion *r = _heap->regions()->get(i);
854 if (r != NULL) {
855 jushort live = ld[i];
856 if (live > 0) {
857 r->increase_live_data_words(live);
858 }
859 }
860 }
861 }
862 }
863
864 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
865 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
866 int seed = 17;
867 uint stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
868
869 ShenandoahHeap* heap = ShenandoahHeap::heap();
870 SCMObjToScanQueueSet* queues = task_queues();
|
33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
35 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
36 #include "gc/shenandoah/brooksPointer.hpp"
37 #include "gc/shared/referenceProcessor.hpp"
38 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
39 #include "code/codeCache.hpp"
40 #include "classfile/symbolTable.hpp"
41 #include "classfile/systemDictionary.hpp"
42 #include "memory/iterator.inline.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "gc/shared/taskqueue.inline.hpp"
45
46 class ShenandoahInitMarkRootsClosure : public OopClosure {
47 private:
48 SCMObjToScanQueue* _queue;
49 ShenandoahHeap* _heap;
50
51 template <class T>
52 inline void do_oop_nv(T* p) {
53 ShenandoahConcurrentMark::mark_through_ref<T, RESOLVE, false>(p, _heap, _queue, NULL);
54 }
55
56 public:
57 ShenandoahInitMarkRootsClosure(SCMObjToScanQueue* q) :
58 _queue(q), _heap(ShenandoahHeap::heap()) {};
59
60 void do_oop(narrowOop* p) { do_oop_nv(p); }
61 void do_oop(oop* p) { do_oop_nv(p); }
62 };
63
64 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(SCMObjToScanQueue* q, ReferenceProcessor* rp) :
65 MetadataAwareOopClosure(rp),
66 _queue(q),
67 _heap(ShenandoahHeap::heap()),
68 _conn_matrix(ShenandoahHeap::heap()->connection_matrix())
69 {
70 }
71
72 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
73 private:
74 ShenandoahRootProcessor* _rp;
75 bool _process_refs;
76 public:
77 ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
78 AbstractGangTask("Shenandoah init mark roots task"),
79 _rp(rp),
80 _process_refs(process_refs) {
81 }
82
83 void work(uint worker_id) {
84 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
85
86 ShenandoahHeap* heap = ShenandoahHeap::heap();
87 SCMObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
88 assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
173 ReferenceProcessor* rp;
174 if (_cm->process_references()) {
175 rp = ShenandoahHeap::heap()->ref_processor();
176 } else {
177 rp = NULL;
178 }
179 if (ShenandoahConcurrentCodeRoots && _cm->claim_codecache()) {
180 if (! _cm->unload_classes()) {
181 ShenandoahMarkResolveRefsClosure cl(q, rp);
182 CodeBlobToOopClosure blobs(&cl, ! CodeBlobToOopClosure::FixRelocations);
183 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
184 CodeCache::blobs_do(&blobs);
185 }
186 }
187
188 _cm->mark_loop(worker_id, _terminator, rp,
189 true, // cancellable
190 true, // drain SATBs as we go
191 true, // count liveness
192 _cm->unload_classes(),
193 _update_refs,
194 UseShenandoahMatrix);
195 }
196 };
197
198 class SCMFinalMarkingTask : public AbstractGangTask {
199 private:
200 ShenandoahConcurrentMark* _cm;
201 ParallelTaskTerminator* _terminator;
202 bool _update_refs;
203 bool _count_live;
204 bool _unload_classes;
205
206 public:
207 SCMFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
208 AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {
209 }
210
211 void work(uint worker_id) {
212 // First drain remaining SATB buffers.
213 // Notice that this is not strictly necessary for mark-compact. But since
214 // it requires a StrongRootsScope around the task, we need to claim the
215 // threads, and performance-wise it doesn't really matter. Adds about 1ms to
216 // full-gc.
217 _cm->drain_satb_buffers(worker_id, true);
218
219 ReferenceProcessor* rp;
220 if (_cm->process_references()) {
221 rp = ShenandoahHeap::heap()->ref_processor();
222 } else {
223 rp = NULL;
224 }
225
226 _cm->mark_loop(worker_id, _terminator, rp,
227 false, // not cancellable
228 false, // do not drain SATBs, already drained
229 _count_live,
230 _unload_classes,
231 _update_refs,
232 UseShenandoahMatrix);
233
234 assert(_cm->task_queues()->is_empty(), "Should be empty");
235 }
236 };
237
238 void ShenandoahConcurrentMark::mark_roots() {
239 assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
240 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
241
242 ShenandoahHeap* heap = ShenandoahHeap::heap();
243
244 ClassLoaderDataGraph::clear_claimed_marks();
245 WorkGang* workers = heap->workers();
246 uint nworkers = workers->active_workers();
247
248 assert(nworkers <= task_queues()->size(), "Just check");
249
250 ShenandoahRootProcessor root_proc(heap, nworkers, ShenandoahCollectorPolicy::scan_thread_roots);
251 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
252 task_queues()->reserve(nworkers);
600 }
601
602
603 void do_void() {
604 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
605
606 ShenandoahHeap* sh = ShenandoahHeap::heap();
607 ShenandoahConcurrentMark* scm = sh->concurrentMark();
608 ReferenceProcessor* rp;
609 if (scm->process_references()) {
610 rp = ShenandoahHeap::heap()->ref_processor();
611 } else {
612 rp = NULL;
613 }
614
615 scm->mark_loop(_worker_id, _terminator, rp,
616 false, // not cancellable
617 false, // do not drain SATBs
618 true, // count liveness
619 scm->unload_classes(),
620 sh->need_update_refs(),
621 UseShenandoahMatrix);
622 }
623 };
624
625
626 class ShenandoahCMKeepAliveClosure : public OopClosure {
627 private:
628 SCMObjToScanQueue* _queue;
629 ShenandoahHeap* _heap;
630
631 template <class T>
632 inline void do_oop_nv(T* p) {
633 ShenandoahConcurrentMark::mark_through_ref<T, NONE, false>(p, _heap, _queue, NULL);
634 }
635
636 public:
637 ShenandoahCMKeepAliveClosure(SCMObjToScanQueue* q) :
638 _queue(q), _heap(ShenandoahHeap::heap()) {}
639
640 void do_oop(narrowOop* p) { do_oop_nv(p); }
641 void do_oop(oop* p) { do_oop_nv(p); }
642 };
643
644 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
645 private:
646 SCMObjToScanQueue* _queue;
647 ShenandoahHeap* _heap;
648
649 template <class T>
650 inline void do_oop_nv(T* p) {
651 ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false>(p, _heap, _queue, NULL);
652 }
653
654 public:
655 ShenandoahCMKeepAliveUpdateClosure(SCMObjToScanQueue* q) :
656 _queue(q), _heap(ShenandoahHeap::heap()) {}
657
658 void do_oop(narrowOop* p) { do_oop_nv(p); }
659 void do_oop(oop* p) { do_oop_nv(p); }
660 };
661
662 class ShenandoahCMKeepAliveMatrixClosure : public OopClosure {
663 private:
664 SCMObjToScanQueue* _queue;
665 ShenandoahHeap* _heap;
666 ShenandoahConnectionMatrix* _conn_matrix;
667
668 template <class T>
669 inline void do_oop_nv(T* p) {
670 ShenandoahConcurrentMark::mark_through_ref<T, NONE, true>(p, _heap, _queue, _conn_matrix);
671 }
672
673 public:
674 ShenandoahCMKeepAliveMatrixClosure(SCMObjToScanQueue* q) :
675 _queue(q), _heap(ShenandoahHeap::heap()),
676 _conn_matrix(ShenandoahHeap::heap()->connection_matrix()) {};
677
678 void do_oop(narrowOop* p) { do_oop_nv(p); }
679 void do_oop(oop* p) { do_oop_nv(p); }
680 };
681
682 class ShenandoahCMKeepAliveUpdateMatrixClosure : public OopClosure {
683 private:
684 SCMObjToScanQueue* _queue;
685 ShenandoahHeap* _heap;
686 ShenandoahConnectionMatrix* _conn_matrix;
687
688 template <class T>
689 inline void do_oop_nv(T* p) {
690 ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, true>(p, _heap, _queue, _conn_matrix);
691 }
692
693 public:
694 ShenandoahCMKeepAliveUpdateMatrixClosure(SCMObjToScanQueue* q) :
695 _queue(q), _heap(ShenandoahHeap::heap()),
696 _conn_matrix(ShenandoahHeap::heap()->connection_matrix()) {};
697
698 void do_oop(narrowOop* p) { do_oop_nv(p); }
699 void do_oop(oop* p) { do_oop_nv(p); }
700 };
701
702 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
703
704 private:
705 AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
706 ParallelTaskTerminator* _terminator;
707 public:
708
709 ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
710 ParallelTaskTerminator* t) :
711 AbstractGangTask("Process reference objects in parallel"),
712 _proc_task(proc_task),
713 _terminator(t) {
714 }
715
716 void work(uint worker_id) {
717 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
718 ShenandoahHeap* heap = ShenandoahHeap::heap();
719 ShenandoahForwardedIsAliveClosure is_alive;
720 ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
721 if (UseShenandoahMatrix) {
722 if (heap->need_update_refs()) {
723 ShenandoahCMKeepAliveUpdateMatrixClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
724 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
725 } else {
726 ShenandoahCMKeepAliveMatrixClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
727 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
728 }
729 } else {
730 if (heap->need_update_refs()) {
731 ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
732 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
733 } else {
734 ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
735 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
736 }
737 }
738 }
739 };
740
741 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
742
743 private:
744 AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
745
746 public:
747
748 ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
749 AbstractGangTask("Enqueue reference objects in parallel"),
750 _enqueue_task(enqueue_task) {
751 }
752
753 void work(uint worker_id) {
754 _enqueue_task.work(worker_id);
755 }
756 };
757
758 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
853
854 // Clean up marking stacks.
855 SCMObjToScanQueueSet* queues = task_queues();
856 queues->clear();
857
858 // Cancel SATB buffers.
859 JavaThread::satb_mark_queue_set().abandon_partial_marking();
860 }
861
862 SCMObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
863 assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
864 return _task_queues->queue(worker_id);
865 }
866
867 void ShenandoahConcurrentMark::clear_queue(SCMObjToScanQueue *q) {
868 q->set_empty();
869 q->overflow_stack()->clear();
870 q->clear_buffer();
871 }
872
873 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS, bool UPDATE_MATRIX>
874 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
875 SCMObjToScanQueue* q = get_queue(w);
876
877 jushort* ld;
878 if (COUNT_LIVENESS) {
879 ld = get_liveness(w);
880 Copy::fill_to_bytes(ld, _heap->max_regions() * sizeof(jushort));
881 } else {
882 ld = NULL;
883 }
884
885 // TODO: We can clean up this if we figure out how to do templated oop closures that
886 // play nice with specialized_oop_iterators.
887 if (UPDATE_MATRIX) {
888 if (CLASS_UNLOAD) {
889 if (UPDATE_REFS) {
890 ShenandoahMarkUpdateRefsMetadataMatrixClosure cl(q, rp);
891 mark_loop_work<ShenandoahMarkUpdateRefsMetadataMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
892 } else {
893 ShenandoahMarkRefsMetadataMatrixClosure cl(q, rp);
894 mark_loop_work<ShenandoahMarkRefsMetadataMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
895 }
896 } else {
897 if (UPDATE_REFS) {
898 ShenandoahMarkUpdateRefsMatrixClosure cl(q, rp);
899 mark_loop_work<ShenandoahMarkUpdateRefsMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
900 } else {
901 ShenandoahMarkRefsMatrixClosure cl(q, rp);
902 mark_loop_work<ShenandoahMarkRefsMatrixClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
903 }
904 }
905 } else {
906 if (CLASS_UNLOAD) {
907 if (UPDATE_REFS) {
908 ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
909 mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
910 } else {
911 ShenandoahMarkRefsMetadataClosure cl(q, rp);
912 mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
913 }
914 } else {
915 if (UPDATE_REFS) {
916 ShenandoahMarkUpdateRefsClosure cl(q, rp);
917 mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
918 } else {
919 ShenandoahMarkRefsClosure cl(q, rp);
920 mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
921 }
922 }
923 }
924 if (COUNT_LIVENESS) {
925 for (uint i = 0; i < _heap->max_regions(); i++) {
926 ShenandoahHeapRegion *r = _heap->regions()->get(i);
927 if (r != NULL) {
928 jushort live = ld[i];
929 if (live > 0) {
930 r->increase_live_data_words(live);
931 }
932 }
933 }
934 }
935 }
936
937 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
938 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
939 int seed = 17;
940 uint stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
941
942 ShenandoahHeap* heap = ShenandoahHeap::heap();
943 SCMObjToScanQueueSet* queues = task_queues();
|