< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 49831 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49834 : [mq]: 8202021-cleanup-referenceprocessor


1639     // we utilize all the worker threads we can.
1640     bool processing_is_mt = rp->processing_is_mt();
1641     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1642     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1643 
1644     // Parallel processing task executor.
1645     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1646                                               _g1h->workers(), active_workers);
1647     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1648 
1649     // Set the concurrency level. The phase was already set prior to
1650     // executing the remark task.
1651     set_concurrency(active_workers);
1652 
1653     // Set the degree of MT processing here.  If the discovery was done MT,
1654     // the number of threads involved during discovery could differ from
1655     // the number of active workers.  This is OK as long as the discovered
1656     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1657     rp->set_active_mt_degree(active_workers);
1658 
1659     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
1660 
1661     // Process the weak references.
1662     const ReferenceProcessorStats& stats =
1663         rp->process_discovered_references(&g1_is_alive,
1664                                           &g1_keep_alive,
1665                                           &g1_drain_mark_stack,
1666                                           executor,
1667                                           &pt);
1668     _gc_tracer_cm->report_gc_reference_stats(stats);
1669     pt.print_all_references();
1670 
1671     // The do_oop work routines of the keep_alive and drain_marking_stack
1672     // oop closures will set the has_overflown flag if we overflow the
1673     // global marking stack.
1674 
1675     assert(has_overflown() || _global_mark_stack.is_empty(),
1676            "Mark stack should be empty (unless it has overflown)");
1677 
1678     assert(rp->num_q() == active_workers, "why not");
1679 
1680     rp->enqueue_discovered_references(executor, &pt);
1681 
1682     rp->verify_no_references_recorded();
1683 
1684     pt.print_enqueue_phase();
1685 
1686     assert(!rp->discovery_enabled(), "Post condition");
1687   }
1688 
1689   assert(has_overflown() || _global_mark_stack.is_empty(),
1690          "Mark stack should be empty (unless it has overflown)");
1691 
1692   {
1693     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1694     WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
1695   }
1696 
1697   if (has_overflown()) {
1698     // We can not trust g1_is_alive if the marking stack overflowed




1639     // we utilize all the worker threads we can.
1640     bool processing_is_mt = rp->processing_is_mt();
1641     uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
1642     active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
1643 
1644     // Parallel processing task executor.
1645     G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
1646                                               _g1h->workers(), active_workers);
1647     AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1648 
1649     // Set the concurrency level. The phase was already set prior to
1650     // executing the remark task.
1651     set_concurrency(active_workers);
1652 
1653     // Set the degree of MT processing here.  If the discovery was done MT,
1654     // the number of threads involved during discovery could differ from
1655     // the number of active workers.  This is OK as long as the discovered
1656     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1657     rp->set_active_mt_degree(active_workers);
1658 
1659     ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
1660 
1661     // Process the weak references.
1662     const ReferenceProcessorStats& stats =
1663         rp->process_discovered_references(&g1_is_alive,
1664                                           &g1_keep_alive,
1665                                           &g1_drain_mark_stack,
1666                                           executor,
1667                                           &pt);
1668     _gc_tracer_cm->report_gc_reference_stats(stats);
1669     pt.print_all_references();
1670 
1671     // The do_oop work routines of the keep_alive and drain_marking_stack
1672     // oop closures will set the has_overflown flag if we overflow the
1673     // global marking stack.
1674 
1675     assert(has_overflown() || _global_mark_stack.is_empty(),
1676            "Mark stack should be empty (unless it has overflown)");
1677 
1678     assert(rp->num_queues() == active_workers, "why not");
1679 
1680     rp->enqueue_discovered_references(executor, &pt);
1681 
1682     rp->verify_no_references_recorded();
1683 
1684     pt.print_enqueue_phase();
1685 
1686     assert(!rp->discovery_enabled(), "Post condition");
1687   }
1688 
1689   assert(has_overflown() || _global_mark_stack.is_empty(),
1690          "Mark stack should be empty (unless it has overflown)");
1691 
1692   {
1693     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
1694     WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
1695   }
1696 
1697   if (has_overflown()) {
1698     // We can not trust g1_is_alive if the marking stack overflowed


< prev index next >