1701 // we utilize all the worker threads we can.
1702 bool processing_is_mt = rp->processing_is_mt();
1703 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1704 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1705
1706 // Parallel processing task executor.
1707 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1708 g1h->workers(), active_workers);
1709 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1710
1711 // Set the concurrency level. The phase was already set prior to
1712 // executing the remark task.
1713 set_concurrency(active_workers);
1714
1715 // Set the degree of MT processing here. If the discovery was done MT,
1716 // the number of threads involved during discovery could differ from
1717 // the number of active workers. This is OK as long as the discovered
1718 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1719 rp->set_active_mt_degree(active_workers);
1720
1721 // Process the weak references.
1722 const ReferenceProcessorStats& stats =
1723 rp->process_discovered_references(&g1_is_alive,
1724 &g1_keep_alive,
1725 &g1_drain_mark_stack,
1726 executor,
1727 _gc_timer_cm);
1728 _gc_tracer_cm->report_gc_reference_stats(stats);
1729
1730 // The do_oop work routines of the keep_alive and drain_marking_stack
1731 // oop closures will set the has_overflown flag if we overflow the
1732 // global marking stack.
1733
1734 assert(has_overflown() || _global_mark_stack.is_empty(),
1735 "Mark stack should be empty (unless it has overflown)");
1736
1737 assert(rp->num_q() == active_workers, "why not");
1738
1739 rp->enqueue_discovered_references(executor);
1740
1741 rp->verify_no_references_recorded();
1742 assert(!rp->discovery_enabled(), "Post condition");
1743 }
1744
1745 if (has_overflown()) {
1746 // We can not trust g1_is_alive if the marking stack overflowed
1747 return;
1748 }
1749
1750 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1751
1752 // Unload Klasses, String, Symbols, Code Cache, etc.
1753 if (ClassUnloadingWithConcurrentMark) {
1754 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1755 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1756 g1h->complete_cleaning(&g1_is_alive, purged_classes);
1757 } else {
1758 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1759 // No need to clean string table and symbol table as they are treated as strong roots when
1760 // class unloading is disabled.
1761 g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
|
1701 // we utilize all the worker threads we can.
1702 bool processing_is_mt = rp->processing_is_mt();
1703 uint active_workers = (processing_is_mt ? g1h->workers()->active_workers() : 1U);
1704 active_workers = MAX2(MIN2(active_workers, _max_worker_id), 1U);
1705
1706 // Parallel processing task executor.
1707 G1CMRefProcTaskExecutor par_task_executor(g1h, this,
1708 g1h->workers(), active_workers);
1709 AbstractRefProcTaskExecutor* executor = (processing_is_mt ? &par_task_executor : NULL);
1710
1711 // Set the concurrency level. The phase was already set prior to
1712 // executing the remark task.
1713 set_concurrency(active_workers);
1714
1715 // Set the degree of MT processing here. If the discovery was done MT,
1716 // the number of threads involved during discovery could differ from
1717 // the number of active workers. This is OK as long as the discovered
1718 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1719 rp->set_active_mt_degree(active_workers);
1720
1721 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
1722
1723 // Process the weak references.
1724 const ReferenceProcessorStats& stats =
1725 rp->process_discovered_references(&g1_is_alive,
1726 &g1_keep_alive,
1727 &g1_drain_mark_stack,
1728 executor,
1729 &pt);
1730 _gc_tracer_cm->report_gc_reference_stats(stats);
1731 pt.print_all_references();
1732
1733 // The do_oop work routines of the keep_alive and drain_marking_stack
1734 // oop closures will set the has_overflown flag if we overflow the
1735 // global marking stack.
1736
1737 assert(has_overflown() || _global_mark_stack.is_empty(),
1738 "Mark stack should be empty (unless it has overflown)");
1739
1740 assert(rp->num_q() == active_workers, "why not");
1741
1742 rp->enqueue_discovered_references(executor, &pt);
1743
1744 rp->verify_no_references_recorded();
1745
1746 pt.print_enqueue_phase();
1747
1748 assert(!rp->discovery_enabled(), "Post condition");
1749 }
1750
1751 if (has_overflown()) {
1752 // We can not trust g1_is_alive if the marking stack overflowed
1753 return;
1754 }
1755
1756 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1757
1758 // Unload Klasses, String, Symbols, Code Cache, etc.
1759 if (ClassUnloadingWithConcurrentMark) {
1760 GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
1761 bool purged_classes = SystemDictionary::do_unloading(&g1_is_alive, _gc_timer_cm, false /* Defer cleaning */);
1762 g1h->complete_cleaning(&g1_is_alive, purged_classes);
1763 } else {
1764 GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
1765 // No need to clean string table and symbol table as they are treated as strong roots when
1766 // class unloading is disabled.
1767 g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
|