< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp

Print this page
rev 10493 : [Backport] Shenandoah string deduplication
rev 10496 : [backport] Rename "cancel_concgc" to "cancel_gc"
rev 10504 : [backport] Full GC always comes with liveness data
rev 10531 : [backport] Improve scheduling and interleaving of SATB processing in mark loop
rev 10536 : [backport] Process remaining SATB buffers in final mark/traverse loop instead of separate phase
rev 10546 : [backport] Wrap worker id in thread local worker session
rev 10547 : [backport] Non-cancellable mark loops should have sensible stride
rev 10554 : [backport] Cleanup UseShenandoahOWST blocks
rev 10561 : [backport] Add task termination and enhanced task queue state tracking + weakrefs
rev 10574 : [backport] Print task queue statistics at the end of GC cycle
rev 10581 : [backport] Refactor alive-closures to deal better with new marking contexts
rev 10582 : [backport] Avoid indirection to next-mark-context
rev 10589 : [backport] Purge support for ShenandoahConcurrentEvacCodeRoots and ShenandoahBarriersForConst
rev 10613 : [backport] Remove obsolete/unused logging usages
rev 10625 : [backport] Soft refs should be purged reliably on allocation failure, or with compact heuristics

*** 45,74 **** template<UpdateRefsMode UPDATE_REFS> class ShenandoahInitMarkRootsClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS>(p, _heap, _queue); } public: ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), _heap(ShenandoahHeap::heap()) {}; void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } }; ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : MetadataAwareOopClosure(rp), _queue(q), ! _heap((ShenandoahHeap*) Universe::heap()) ! { ! } template<UpdateRefsMode UPDATE_REFS> class ShenandoahInitMarkRootsTask : public AbstractGangTask { private: ShenandoahRootProcessor* _rp; --- 45,88 ---- template<UpdateRefsMode UPDATE_REFS> class ShenandoahInitMarkRootsClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, false /* string dedup */>(p, _heap, _queue, _mark_context); } public: ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), ! _heap(ShenandoahHeap::heap()), ! _mark_context(_heap->next_marking_context()) {}; void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } }; ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : MetadataAwareOopClosure(rp), _queue(q), ! _dedup_queue(NULL), ! _heap(ShenandoahHeap::heap()), ! _mark_context(_heap->next_marking_context()) ! { } ! ! ! ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : ! MetadataAwareOopClosure(rp), ! _queue(q), ! _dedup_queue(dq), ! _heap(ShenandoahHeap::heap()), ! _mark_context(_heap->next_marking_context()) ! { } ! template<UpdateRefsMode UPDATE_REFS> class ShenandoahInitMarkRootsTask : public AbstractGangTask { private: ShenandoahRootProcessor* _rp;
*** 80,89 **** --- 94,104 ---- _process_refs(process_refs) { } void work(uint worker_id) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + ShenandoahWorkerSession worker_session(worker_id); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahObjToScanQueueSet* queues = heap->concurrentMark()->task_queues(); assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));
*** 114,124 **** #ifdef ASSERT ShenandoahAssertToSpaceClosure assert_to_space_oops; CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations); // If conc code cache evac is disabled, code cache should have only to-space ptrs. // Otherwise, it should have to-space ptrs only if mark does not update refs. ! if (!ShenandoahConcurrentEvacCodeRoots && !heap->has_forwarded_objects()) { code_blobs = &assert_to_space; } #endif _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id); } else { --- 129,139 ---- #ifdef ASSERT ShenandoahAssertToSpaceClosure assert_to_space_oops; CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations); // If conc code cache evac is disabled, code cache should have only to-space ptrs. // Otherwise, it should have to-space ptrs only if mark does not update refs. ! if (!heap->has_forwarded_objects()) { code_blobs = &assert_to_space; } #endif _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id); } else {
*** 139,148 **** --- 154,164 ---- _update_code_cache(update_code_cache) { } void work(uint worker_id) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); + ShenandoahWorkerSession worker_session(worker_id); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahUpdateRefsClosure cl; CLDToOopClosure cldCl(&cl);
*** 174,183 **** --- 190,200 ---- AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) { } void work(uint worker_id) { + ShenandoahWorkerSession worker_session(worker_id); ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); jushort* live_data = _cm->get_liveness(worker_id); ReferenceProcessor* rp; if (_cm->process_references()) { rp = ShenandoahHeap::heap()->ref_processor();
*** 187,223 **** } _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs); _cm->mark_loop(worker_id, _terminator, rp, true, // cancellable - true, // drain SATBs as we go - true, // count liveness _cm->unload_classes(), ! _update_refs); } }; class ShenandoahFinalMarkingTask : public AbstractGangTask { private: ShenandoahConcurrentMark* _cm; ParallelTaskTerminator* _terminator; bool _update_refs; - bool _count_live; bool _unload_classes; public: ! ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) : ! AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) { } void work(uint worker_id) { // First drain remaining SATB buffers. // Notice that this is not strictly necessary for mark-compact. But since // it requires a StrongRootsScope around the task, we need to claim the // threads, and performance-wise it doesn't really matter. Adds about 1ms to // full-gc. ! _cm->drain_satb_buffers(worker_id, true); ReferenceProcessor* rp; if (_cm->process_references()) { rp = ShenandoahHeap::heap()->ref_processor(); shenandoah_assert_rp_isalive_installed(); --- 204,271 ---- } _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs); _cm->mark_loop(worker_id, _terminator, rp, true, // cancellable _cm->unload_classes(), ! _update_refs, ! ShenandoahStringDedup::is_enabled()); // perform string dedup ! } ! }; ! ! class ShenandoahSATBThreadsClosure : public ThreadClosure { ! ShenandoahSATBBufferClosure* _satb_cl; ! int _thread_parity; ! ! public: ! ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) : ! _satb_cl(satb_cl), ! _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} ! ! void do_thread(Thread* thread) { ! if (thread->is_Java_thread()) { ! if (thread->claim_oops_do(true, _thread_parity)) { ! JavaThread* jt = (JavaThread*)thread; ! jt->satb_mark_queue().apply_closure_and_empty(_satb_cl); ! } ! } else if (thread->is_VM_thread()) { ! if (thread->claim_oops_do(true, _thread_parity)) { ! JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); ! } ! } } }; class ShenandoahFinalMarkingTask : public AbstractGangTask { private: ShenandoahConcurrentMark* _cm; ParallelTaskTerminator* _terminator; bool _update_refs; bool _unload_classes; + bool _dedup_string; public: ! ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, ! bool update_refs, bool unload_classes, bool dedup_string) : ! AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), ! _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) { } void work(uint worker_id) { // First drain remaining SATB buffers. // Notice that this is not strictly necessary for mark-compact. But since // it requires a StrongRootsScope around the task, we need to claim the // threads, and performance-wise it doesn't really matter. Adds about 1ms to // full-gc. ! { ! ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); ! ShenandoahSATBBufferClosure cl(q); ! SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); ! while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); ! ShenandoahSATBThreadsClosure tc(&cl); ! Threads::threads_do(&tc); ! } ReferenceProcessor* rp; if (_cm->process_references()) { rp = ShenandoahHeap::heap()->ref_processor(); shenandoah_assert_rp_isalive_installed();
*** 228,241 **** // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned, // let's check here. _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs); _cm->mark_loop(worker_id, _terminator, rp, false, // not cancellable - false, // do not drain SATBs, already drained - _count_live, _unload_classes, ! _update_refs); assert(_cm->task_queues()->is_empty(), "Should be empty"); } }; --- 276,288 ---- // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned, // let's check here. _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs); _cm->mark_loop(worker_id, _terminator, rp, false, // not cancellable _unload_classes, ! _update_refs, ! _dedup_string); assert(_cm->task_queues()->is_empty(), "Should be empty"); } };
*** 251,261 **** uint nworkers = workers->active_workers(); assert(nworkers <= task_queues()->size(), "Just check"); ShenandoahRootProcessor root_proc(heap, nworkers, root_phase); ! TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); task_queues()->reserve(nworkers); if (heap->has_forwarded_objects()) { ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, process_references()); workers->run_task(&mark_roots); --- 298,308 ---- uint nworkers = workers->active_workers(); assert(nworkers <= task_queues()->size(), "Just check"); ShenandoahRootProcessor root_proc(heap, nworkers, root_phase); ! TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); task_queues()->reserve(nworkers); if (heap->has_forwarded_objects()) { ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, process_references()); workers->run_task(&mark_roots);
*** 283,294 **** bool update_code_cache = true; // initialize to safer value switch (root_phase) { case ShenandoahPhaseTimings::update_roots: case ShenandoahPhaseTimings::final_update_refs_roots: ! // If code cache was evacuated concurrently, we need to update code cache roots. ! update_code_cache = ShenandoahConcurrentEvacCodeRoots; break; case ShenandoahPhaseTimings::full_gc_roots: update_code_cache = true; break; default: --- 330,340 ---- bool update_code_cache = true; // initialize to safer value switch (root_phase) { case ShenandoahPhaseTimings::update_roots: case ShenandoahPhaseTimings::final_update_refs_roots: ! update_code_cache = false; break; case ShenandoahPhaseTimings::full_gc_roots: update_code_cache = true; break; default:
*** 363,412 **** ReferenceProcessor* rp = sh->ref_processor(); rp->set_active_mt_degree(nworkers); // enable ("weak") refs discovery rp->enable_discovery(true /*verify_no_refs*/, true); ! rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle } shenandoah_assert_rp_isalive_not_installed(); ! ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), sh->is_alive_closure()); task_queues()->reserve(nworkers); if (UseShenandoahOWST) { ShenandoahTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahConcurrentMarkingTask markingTask = ShenandoahConcurrentMarkingTask(this, &terminator, update_refs); ! workers->run_task(&markingTask); } else { ParallelTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahConcurrentMarkingTask markingTask = ShenandoahConcurrentMarkingTask(this, &terminator, update_refs); ! workers->run_task(&markingTask); } ! assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled"); ! if (! sh->cancelled_concgc()) { ! TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); } ! TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); } void ShenandoahConcurrentMark::finish_mark_from_roots() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); ShenandoahHeap* sh = ShenandoahHeap::heap(); ! TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); shared_finish_mark_from_roots(/* full_gc = */ false); if (sh->has_forwarded_objects()) { update_roots(ShenandoahPhaseTimings::update_roots); } ! TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); } void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); --- 409,462 ---- ReferenceProcessor* rp = sh->ref_processor(); rp->set_active_mt_degree(nworkers); // enable ("weak") refs discovery rp->enable_discovery(true /*verify_no_refs*/, true); ! rp->setup_policy(sh->collector_policy()->should_clear_all_soft_refs()); } shenandoah_assert_rp_isalive_not_installed(); ! ShenandoahIsAliveSelector is_alive; ! ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure()); task_queues()->reserve(nworkers); + { + ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination); if (UseShenandoahOWST) { ShenandoahTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahConcurrentMarkingTask task(this, &terminator, update_refs); ! workers->run_task(&task); } else { ParallelTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahConcurrentMarkingTask task(this, &terminator, update_refs); ! workers->run_task(&task); ! } } ! assert(task_queues()->is_empty() || sh->cancelled_gc(), "Should be empty when not cancelled"); ! if (!sh->cancelled_gc()) { ! TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); } ! TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); } void ShenandoahConcurrentMark::finish_mark_from_roots() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); ShenandoahHeap* sh = ShenandoahHeap::heap(); ! TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); shared_finish_mark_from_roots(/* full_gc = */ false); if (sh->has_forwarded_objects()) { update_roots(ShenandoahPhaseTimings::update_roots); } ! TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); } void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
*** 423,446 **** // The implementation is the same, so it's shared here. { ShenandoahGCPhase phase(full_gc ? ShenandoahPhaseTimings::full_gc_mark_finish_queues : ShenandoahPhaseTimings::finish_queues); - bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC task_queues()->reserve(nworkers); shenandoah_assert_rp_isalive_not_installed(); ! ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), sh->is_alive_closure()); SharedHeap::StrongRootsScope scope(sh, true); if (UseShenandoahOWST) { ShenandoahTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), count_live, unload_classes()); sh->workers()->run_task(&task); } else { ParallelTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), count_live, unload_classes()); sh->workers()->run_task(&task); } } assert(task_queues()->is_empty(), "Should be empty"); --- 473,502 ---- // The implementation is the same, so it's shared here. { ShenandoahGCPhase phase(full_gc ? ShenandoahPhaseTimings::full_gc_mark_finish_queues : ShenandoahPhaseTimings::finish_queues); task_queues()->reserve(nworkers); shenandoah_assert_rp_isalive_not_installed(); ! ShenandoahIsAliveSelector is_alive; ! ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure()); ! ! ShenandoahTerminationTracker termination_tracker(full_gc ? ! ShenandoahPhaseTimings::full_gc_mark_termination : ! ShenandoahPhaseTimings::termination); SharedHeap::StrongRootsScope scope(sh, true); if (UseShenandoahOWST) { ShenandoahTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), ! unload_classes(), full_gc && ShenandoahStringDedup::is_enabled()); sh->workers()->run_task(&task); } else { ParallelTaskTerminator terminator(nworkers, task_queues()); ! ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), ! unload_classes(), full_gc && ShenandoahStringDedup::is_enabled()); sh->workers()->run_task(&task); } } assert(task_queues()->is_empty(), "Should be empty");
*** 454,538 **** if (unload_classes()) { sh->unload_classes_and_cleanup_tables(full_gc); } assert(task_queues()->is_empty(), "Should be empty"); ! } - class ShenandoahSATBThreadsClosure : public ThreadClosure { - ShenandoahSATBBufferClosure* _satb_cl; - int _thread_parity; - - public: - ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) : - _satb_cl(satb_cl), - _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} - - void do_thread(Thread* thread) { - if (thread->is_Java_thread()) { - if (thread->claim_oops_do(true, _thread_parity)) { - JavaThread* jt = (JavaThread*)thread; - jt->satb_mark_queue().apply_closure_and_empty(_satb_cl); - } - } else if (thread->is_VM_thread()) { - if (thread->claim_oops_do(true, _thread_parity)) { - JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); - } - } - } - }; - - void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) { - ShenandoahObjToScanQueue* q = get_queue(worker_id); - ShenandoahSATBBufferClosure cl(q); - - SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); - while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); - - if (remark) { - ShenandoahSATBThreadsClosure tc(&cl); - Threads::threads_do(&tc); - } - } - - #if TASKQUEUE_STATS - void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) { - st->print_raw_cr("GC Task Stats"); - st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); - st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); - } - - void ShenandoahConcurrentMark::print_taskqueue_stats() const { - if (! ShenandoahLogTrace) { - return; - } - ResourceMark rm; - outputStream* st = gclog_or_tty; - print_taskqueue_stats_hdr(st); - - TaskQueueStats totals; - const uint n = _task_queues->size(); - for (uint i = 0; i < n; ++i) { - st->print(UINT32_FORMAT_W(3), i); - _task_queues->queue(i)->stats.print(st); - st->cr(); - totals += _task_queues->queue(i)->stats; - } - st->print("tot "); totals.print(st); st->cr(); - DEBUG_ONLY(totals.verify()); - - } - - void ShenandoahConcurrentMark::reset_taskqueue_stats() { - const uint n = task_queues()->size(); - for (uint i = 0; i < n; ++i) { - task_queues()->queue(i)->stats.reset(); - } - } - #endif // TASKQUEUE_STATS - // Weak Reference Closures class ShenandoahCMDrainMarkingStackClosure: public VoidClosure { uint _worker_id; ParallelTaskTerminator* _terminator; bool _reset_terminator; --- 510,523 ---- if (unload_classes()) { sh->unload_classes_and_cleanup_tables(full_gc); } assert(task_queues()->is_empty(), "Should be empty"); ! TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); ! TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); } // Weak Reference Closures class ShenandoahCMDrainMarkingStackClosure: public VoidClosure { uint _worker_id; ParallelTaskTerminator* _terminator; bool _reset_terminator;
*** 554,567 **** shenandoah_assert_rp_isalive_installed(); scm->mark_loop(_worker_id, _terminator, rp, false, // not cancellable - false, // do not drain SATBs - true, // count liveness scm->unload_classes(), ! sh->has_forwarded_objects()); if (_reset_terminator) { _terminator->reset_for_reuse(); } } --- 539,551 ---- shenandoah_assert_rp_isalive_installed(); scm->mark_loop(_worker_id, _terminator, rp, false, // not cancellable scm->unload_classes(), ! sh->has_forwarded_objects(), ! false); // do not do strdedup if (_reset_terminator) { _terminator->reset_for_reuse(); } }
*** 570,606 **** class ShenandoahCMKeepAliveClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, NONE>(p, _heap, _queue); } public: ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), _heap(ShenandoahHeap::heap()) {}; void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } }; class ShenandoahCMKeepAliveUpdateClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE>(p, _heap, _queue); } public: ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), _heap(ShenandoahHeap::heap()) {}; void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } }; --- 554,596 ---- class ShenandoahCMKeepAliveClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue, _mark_context); } public: ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), ! _heap(ShenandoahHeap::heap()), ! _mark_context(_heap->next_marking_context()) {} void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } }; class ShenandoahCMKeepAliveUpdateClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false /* string dedup */>(p, _heap, _queue, _mark_context); } public: ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), ! _heap(ShenandoahHeap::heap()), ! _mark_context(_heap->next_marking_context()) {} void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } };
*** 728,747 **** ShenandoahPhaseTimings::Phase phase_enqueue = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs_enqueue : ShenandoahPhaseTimings::weakrefs_enqueue; shenandoah_assert_rp_isalive_not_installed(); ! ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure()); WorkGang* workers = sh->workers(); uint nworkers = workers->active_workers(); ! // Setup collector policy for softref cleaning. ! bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/); ! log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs)); ! rp->setup_policy(clear_soft_refs); rp->set_active_mt_degree(nworkers); assert(task_queues()->is_empty(), "Should be empty"); // complete_gc and keep_alive closures instantiated here are only needed for --- 718,740 ---- ShenandoahPhaseTimings::Phase phase_enqueue = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs_enqueue : ShenandoahPhaseTimings::weakrefs_enqueue; + ShenandoahPhaseTimings::Phase phase_process_termination = + full_gc ? + ShenandoahPhaseTimings::full_gc_weakrefs_termination : + ShenandoahPhaseTimings::weakrefs_termination; + shenandoah_assert_rp_isalive_not_installed(); ! ShenandoahIsAliveSelector is_alive; ! ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); WorkGang* workers = sh->workers(); uint nworkers = workers->active_workers(); ! rp->setup_policy(sh->collector_policy()->should_clear_all_soft_refs()); rp->set_active_mt_degree(nworkers); assert(task_queues()->is_empty(), "Should be empty"); // complete_gc and keep_alive closures instantiated here are only needed for
*** 754,763 **** --- 747,757 ---- ShenandoahRefProcTaskExecutor executor(workers); { ShenandoahGCPhase phase(phase_process); + ShenandoahTerminationTracker phase_term(phase_process_termination); if (sh->has_forwarded_objects()) { ShenandoahForwardedIsAliveClosure is_alive; ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id)); rp->process_discovered_references(&is_alive, &keep_alive,
*** 783,793 **** class ShenandoahCancelledGCYieldClosure : public YieldClosure { private: ShenandoahHeap* const _heap; public: ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; ! virtual bool should_return() { return _heap->cancelled_concgc(); } }; class ShenandoahPrecleanCompleteGCClosure : public VoidClosure { public: void do_void() { --- 777,787 ---- class ShenandoahCancelledGCYieldClosure : public YieldClosure { private: ShenandoahHeap* const _heap; public: ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; ! virtual bool should_return() { return _heap->cancelled_gc(); } }; class ShenandoahPrecleanCompleteGCClosure : public VoidClosure { public: void do_void() {
*** 799,828 **** ReferenceProcessor* rp = sh->ref_processor(); shenandoah_assert_rp_isalive_installed(); scm->mark_loop(0, &terminator, rp, false, // not cancellable - true, // drain SATBs - true, // count liveness scm->unload_classes(), ! sh->has_forwarded_objects()); } }; class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT>(p, _heap, _queue); } public: ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), _heap(ShenandoahHeap::heap()) {} void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } }; --- 793,824 ---- ReferenceProcessor* rp = sh->ref_processor(); shenandoah_assert_rp_isalive_installed(); scm->mark_loop(0, &terminator, rp, false, // not cancellable scm->unload_classes(), ! sh->has_forwarded_objects(), ! false); // do not do strdedup } }; class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure { private: ShenandoahObjToScanQueue* _queue; ShenandoahHeap* _heap; + ShenandoahMarkingContext* const _mark_context; template <class T> inline void do_oop_nv(T* p) { ! ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue, _mark_context); } public: ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : ! _queue(q), ! _heap(ShenandoahHeap::heap()), ! _mark_context(_heap->next_marking_context()) {} void do_oop(narrowOop* p) { do_oop_nv(p); } void do_oop(oop* p) { do_oop_nv(p); } };
*** 839,849 **** ShenandoahHeap* sh = ShenandoahHeap::heap(); ReferenceProcessor* rp = sh->ref_processor(); ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); shenandoah_assert_rp_isalive_not_installed(); ! ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure()); // Interrupt on cancelled GC ShenandoahCancelledGCYieldClosure yield; assert(task_queues()->is_empty(), "Should be empty"); --- 835,846 ---- ShenandoahHeap* sh = ShenandoahHeap::heap(); ReferenceProcessor* rp = sh->ref_processor(); ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); shenandoah_assert_rp_isalive_not_installed(); ! ShenandoahIsAliveSelector is_alive; ! ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); // Interrupt on cancelled GC ShenandoahCancelledGCYieldClosure yield; assert(task_queues()->is_empty(), "Should be empty");
*** 886,942 **** q->set_empty(); q->overflow_stack()->clear(); q->clear_buffer(); } ! template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS> ! void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp, bool class_unload, bool update_refs) { ShenandoahObjToScanQueue* q = get_queue(w); ! jushort* ld; ! if (COUNT_LIVENESS) { ! ld = get_liveness(w); Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort)); - } else { - ld = NULL; - } // TODO: We can clean up this if we figure out how to do templated oop closures that // play nice with specialized_oop_iterators. if (class_unload) { if (update_refs) { ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t); } else { ShenandoahMarkRefsMetadataClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t); } } else { if (update_refs) { ShenandoahMarkUpdateRefsClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t); } else { ShenandoahMarkRefsClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t); } } - if (COUNT_LIVENESS) { for (uint i = 0; i < _heap->num_regions(); i++) { ShenandoahHeapRegion* r = _heap->get_region(i); jushort live = ld[i]; if (live > 0) { r->increase_live_data_gc_words(live); } } - } } ! template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS> void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) { int seed = 17; ! uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1; ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahObjToScanQueueSet* queues = task_queues(); ShenandoahObjToScanQueue* q; ShenandoahMarkTask t; --- 883,957 ---- q->set_empty(); q->overflow_stack()->clear(); q->clear_buffer(); } ! template <bool CANCELLABLE> ! void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp, ! bool class_unload, bool update_refs, bool strdedup) { ShenandoahObjToScanQueue* q = get_queue(w); ! jushort* ld = get_liveness(w); Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort)); // TODO: We can clean up this if we figure out how to do templated oop closures that // play nice with specialized_oop_iterators. if (class_unload) { if (update_refs) { + if (strdedup) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp); + mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t); + } else { ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t); ! } ! } else { ! if (strdedup) { ! ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); ! ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp); ! mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t); } else { ShenandoahMarkRefsMetadataClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t); ! } } } else { if (update_refs) { + if (strdedup) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp); + mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t); + } else { ShenandoahMarkUpdateRefsClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t); ! } ! } else { ! if (strdedup) { ! ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); ! ShenandoahMarkRefsDedupClosure cl(q, dq, rp); ! mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t); } else { ShenandoahMarkRefsClosure cl(q, rp); ! mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t); ! } } } for (uint i = 0; i < _heap->num_regions(); i++) { ShenandoahHeapRegion* r = _heap->get_region(i); jushort live = ld[i]; if (live > 0) { r->increase_live_data_gc_words(live); } } } ! template <class T, bool CANCELLABLE> void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) { int seed = 17; ! uintx stride = ShenandoahMarkLoopStride; ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahObjToScanQueueSet* queues = task_queues(); ShenandoahObjToScanQueue* q; ShenandoahMarkTask t;
*** 951,998 **** assert(queues->get_reserved() == heap->workers()->active_workers(), "Need to reserve proper number of queues"); q = queues->claim_next(); while (q != NULL) { ! if (CANCELLABLE && heap->cancelled_concgc()) { ShenandoahCancelledTerminatorTerminator tt; while (!terminator->offer_termination(&tt)); return; } for (uint i = 0; i < stride; i++) { if (try_queue(q, t)) { ! do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t); } else { assert(q->is_empty(), "Must be empty"); q = queues->claim_next(); break; } } } q = get_queue(worker_id); /* * Normal marking loop: */ while (true) { ! if (CANCELLABLE && heap->cancelled_concgc()) { ShenandoahCancelledTerminatorTerminator tt; while (!terminator->offer_termination(&tt)); return; } for (uint i = 0; i < stride; i++) { if (try_queue(q, t) || - (DRAIN_SATB && try_draining_satb_buffer(q, t)) || queues->steal(worker_id, &seed, t)) { ! do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t); } else { ! if (terminator->offer_termination()) return; } } } } bool ShenandoahConcurrentMark::process_references() const { return _heap->process_references(); --- 966,1027 ---- assert(queues->get_reserved() == heap->workers()->active_workers(), "Need to reserve proper number of queues"); q = queues->claim_next(); while (q != NULL) { ! if (CANCELLABLE && heap->cancelled_gc()) { ShenandoahCancelledTerminatorTerminator tt; while (!terminator->offer_termination(&tt)); return; } for (uint i = 0; i < stride; i++) { if (try_queue(q, t)) { ! do_task<T>(q, cl, live_data, &t); } else { assert(q->is_empty(), "Must be empty"); q = queues->claim_next(); break; } } } q = get_queue(worker_id); + ShenandoahSATBBufferClosure drain_satb(q); + SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); + /* * Normal marking loop: */ while (true) { ! if (CANCELLABLE && heap->cancelled_gc()) { ShenandoahCancelledTerminatorTerminator tt; while (!terminator->offer_termination(&tt)); return; } + while (satb_mq_set.completed_buffers_num() > 0) { + satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); + } + + uint work = 0; for (uint i = 0; i < stride; i++) { if (try_queue(q, t) || queues->steal(worker_id, &seed, t)) { ! do_task<T>(q, cl, live_data, &t); ! work++; } else { ! break; } } + + if (work == 0) { + // No work encountered in current stride, try to terminate. + ShenandoahTerminationTimingsTracker term_tracker(worker_id); + if (terminator->offer_termination()) return; + } } } bool ShenandoahConcurrentMark::process_references() const { return _heap->process_references();
< prev index next >