src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 3952 : 8005032: G1: Cleanup serial reference processing closures in concurrent marking
Summary: Reuse the parallel reference processing oop closures during serial reference processing.
Reviewed-by:

*** 2165,2251 **** } } assert(tmp_free_list.is_empty(), "post-condition"); } ! // Support closures for reference procssing in G1 bool G1CMIsAliveClosure::do_object_b(oop obj) { HeapWord* addr = (HeapWord*)obj; return addr != NULL && (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); } ! class G1CMKeepAliveClosure: public ExtendedOopClosure { ! G1CollectedHeap* _g1; ! ConcurrentMark* _cm; ! public: ! G1CMKeepAliveClosure(G1CollectedHeap* g1, ConcurrentMark* cm) : ! _g1(g1), _cm(cm) { ! assert(Thread::current()->is_VM_thread(), "otherwise fix worker id"); ! } ! ! virtual void do_oop(narrowOop* p) { do_oop_work(p); } ! virtual void do_oop( oop* p) { do_oop_work(p); } ! ! template <class T> void do_oop_work(T* p) { ! oop obj = oopDesc::load_decode_heap_oop(p); ! HeapWord* addr = (HeapWord*)obj; ! ! if (_cm->verbose_high()) { ! gclog_or_tty->print_cr("\t[0] we're looking at location " ! "*"PTR_FORMAT" = "PTR_FORMAT, ! p, (void*) obj); ! } ! ! if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) { ! _cm->mark_and_count(obj); ! _cm->mark_stack_push(obj); ! } ! } ! }; ! ! class G1CMDrainMarkingStackClosure: public VoidClosure { ! ConcurrentMark* _cm; ! CMMarkStack* _markStack; ! G1CMKeepAliveClosure* _oopClosure; ! public: ! G1CMDrainMarkingStackClosure(ConcurrentMark* cm, CMMarkStack* markStack, ! G1CMKeepAliveClosure* oopClosure) : ! _cm(cm), ! _markStack(markStack), ! _oopClosure(oopClosure) { } ! ! void do_void() { ! _markStack->drain(_oopClosure, _cm->nextMarkBitMap(), false); ! } ! }; - // 'Keep Alive' closure used by parallel reference processing. - // An instance of this closure is used in the parallel reference processing - // code rather than an instance of G1CMKeepAliveClosure. We could have used - // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are - // placed on to discovered ref lists once so we can mark and push with no - // need to check whether the object has already been marked. Using the - // G1CMKeepAliveClosure would mean, however, having all the worker threads - // operating on the global mark stack. This means that an individual - // worker would be doing lock-free pushes while it processes its own - // discovered ref list followed by drain call. If the discovered ref lists - // are unbalanced then this could cause interference with the other - // workers. Using a CMTask (and its embedded local data structures) - // avoids that potential interference. class G1CMParKeepAliveAndDrainClosure: public OopClosure { ConcurrentMark* _cm; CMTask* _task; int _ref_counter_limit; int _ref_counter; public: ! G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) : ! _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval) { assert(_ref_counter_limit > 0, "sanity"); _ref_counter = _ref_counter_limit; } virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); } --- 2165,2206 ---- } } assert(tmp_free_list.is_empty(), "post-condition"); } ! // Supporting Object and Oop closures for reference discovery ! // and processing in during marking bool G1CMIsAliveClosure::do_object_b(oop obj) { HeapWord* addr = (HeapWord*)obj; return addr != NULL && (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); } ! // 'Keep Alive' oop closure used by both serial parallel reference processing. ! // Uses the CMTask associated with a worker thread (for serial reference ! // processing the CMTask for worker 0 is used) to preserve (mark) and ! // trace referent objects. ! // ! // Using the CMTask and embedded local queues avoids having the worker ! // threads operating on the global mark stack in parallel - potentially ! // interfering with each other. class G1CMParKeepAliveAndDrainClosure: public OopClosure { ConcurrentMark* _cm; CMTask* _task; int _ref_counter_limit; int _ref_counter; + bool _is_par; + public: ! G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task, bool is_par) : ! _cm(cm), _task(task), _is_par(is_par), _ref_counter_limit(G1RefProcDrainInterval) { assert(_ref_counter_limit > 0, "sanity"); _ref_counter = _ref_counter_limit; + assert(_task->worker_id() == 0 || _is_par, "sanity"); } virtual void do_oop(narrowOop* p) { do_oop_work(p); } virtual void do_oop( oop* p) { do_oop_work(p); }
*** 2288,2305 **** } } } }; class G1CMParDrainMarkingStackClosure: public VoidClosure { ConcurrentMark* _cm; CMTask* _task; public: ! G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) : ! _cm(cm), _task(task) { } void do_void() { do { if (_cm->verbose_high()) { gclog_or_tty->print_cr("\t[%u] Drain: Calling do marking_step", _task->worker_id()); } --- 2243,2278 ---- } } } }; + // 'Drain' oop closure used by both serial and parallel reference processing. + // Uses the CMTask associated with a given worker thread (for serial + // reference processing the CMtask for worker 0 is used). Calls the + // do_marking_step routine, with an unbelievably large timeout value, + // to drain the marking data structures of the remaining entries + // added by the 'keep alive' oop closure above. + class G1CMParDrainMarkingStackClosure: public VoidClosure { ConcurrentMark* _cm; CMTask* _task; + bool _is_par; public: ! G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task, bool is_par) : ! _cm(cm), _task(task), _is_par(is_par) { ! assert(_task->worker_id() == 0 || _is_par, "sanity"); ! } void do_void() { + if (!_is_par) { + // We need to set the phase so that the termination protocol in + // do_marking_step only waits for a single thread. The parallel + // version of this closure has the phase set for it by the + // RefProcTask executor instance. + _cm->set_phase(1, false /* concurrent */); + } + do { if (_cm->verbose_high()) { gclog_or_tty->print_cr("\t[%u] Drain: Calling do marking_step", _task->worker_id()); }
*** 2361,2372 **** _proc_task(proc_task), _g1h(g1h), _cm(cm) { } virtual void work(uint worker_id) { CMTask* marking_task = _cm->task(worker_id); G1CMIsAliveClosure g1_is_alive(_g1h); ! G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task); ! G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task); _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); } }; --- 2334,2345 ---- _proc_task(proc_task), _g1h(g1h), _cm(cm) { } virtual void work(uint worker_id) { CMTask* marking_task = _cm->task(worker_id); G1CMIsAliveClosure g1_is_alive(_g1h); ! G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, true /* is_par */); ! G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task, true /* is_par */); _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain); } };
*** 2427,2489 **** ReferenceProcessor* rp = g1h->ref_processor_cm(); // See the comment in G1CollectedHeap::ref_processing_init() // about how reference processing currently works in G1. ! // Process weak references. rp->setup_policy(clear_all_soft_refs); assert(_markStack.isEmpty(), "mark stack should be empty"); ! G1CMKeepAliveClosure g1_keep_alive(g1h, this); ! G1CMDrainMarkingStackClosure ! g1_drain_mark_stack(this, &_markStack, &g1_keep_alive); // We use the work gang from the G1CollectedHeap and we utilize all // the worker threads. uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U; active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); G1CMRefProcTaskExecutor par_task_executor(g1h, this, g1h->workers(), active_workers); if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). rp->set_active_mt_degree(active_workers); rp->process_discovered_references(&g1_is_alive, &g1_keep_alive, &g1_drain_mark_stack, ! &par_task_executor); ! // The work routines of the parallel keep_alive and drain_marking_stack ! // will set the has_overflown flag if we overflow the global marking ! // stack. ! } else { ! rp->process_discovered_references(&g1_is_alive, ! &g1_keep_alive, ! &g1_drain_mark_stack, ! NULL); ! } assert(_markStack.overflow() || _markStack.isEmpty(), "mark stack should be empty (unless it overflowed)"); if (_markStack.overflow()) { ! // Should have been done already when we tried to push an // entry on to the global mark stack. But let's do it again. set_has_overflown(); } ! if (rp->processing_is_mt()) { ! assert(rp->num_q() == active_workers, "why not"); ! rp->enqueue_discovered_references(&par_task_executor); ! } else { ! rp->enqueue_discovered_references(); ! } rp->verify_no_references_recorded(); assert(!rp->discovery_enabled(), "Post condition"); } --- 2400,2457 ---- ReferenceProcessor* rp = g1h->ref_processor_cm(); // See the comment in G1CollectedHeap::ref_processing_init() // about how reference processing currently works in G1. ! // Set the soft reference policy rp->setup_policy(clear_all_soft_refs); assert(_markStack.isEmpty(), "mark stack should be empty"); ! // Non-MT 'Keep Alive' and 'Complete GC' oop closures. ! G1CMParKeepAliveAndDrainClosure g1_keep_alive(this, task(0), false /* is_par */); ! G1CMParDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), false /* is_par */); // We use the work gang from the G1CollectedHeap and we utilize all // the worker threads. uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U; active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U); G1CMRefProcTaskExecutor par_task_executor(g1h, this, g1h->workers(), active_workers); + // Process the weak references. if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). rp->set_active_mt_degree(active_workers); + } rp->process_discovered_references(&g1_is_alive, &g1_keep_alive, &g1_drain_mark_stack, ! (rp->processing_is_mt() ? &par_task_executor ! : NULL)); ! // The do_oop work routines of the keep_alive and drain_marking_stack ! // oop closures will set the has_overflown flag if we overflow the ! // global marking stack. assert(_markStack.overflow() || _markStack.isEmpty(), "mark stack should be empty (unless it overflowed)"); if (_markStack.overflow()) { ! // This should have been done already when we tried to push an // entry on to the global mark stack. But let's do it again. set_has_overflown(); } ! assert(!rp->processing_is_mt() || rp->num_q() == active_workers, "why not"); ! ! rp->enqueue_discovered_references((rp->processing_is_mt() ? &par_task_executor ! : NULL)); rp->verify_no_references_recorded(); assert(!rp->discovery_enabled(), "Post condition"); }