1669 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1670 // may race against OopStorage::release() calls.
1671 ShenandoahEvacUpdateOopStorageRootsClosure cl;
1672 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1673 }
1674
1675 {
1676 ShenandoahEvacuateUpdateRootsClosure<> cl;
1677 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1678 _cld_roots.cld_do(&clds, worker_id);
1679 }
1680 }
1681 };
1682
1683 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1684 private:
1685 ShenandoahHeap* const _heap;
1686 ShenandoahMarkingContext* const _mark_context;
1687 bool _evac_in_progress;
1688 Thread* const _thread;
1689 size_t _dead_counter;
1690
1691 public:
1692 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
1693 void do_oop(oop* p);
1694 void do_oop(narrowOop* p);
1695
1696 size_t dead_counter() const;
1697 void reset_dead_counter();
1698 };
1699
1700 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
1701 _heap(ShenandoahHeap::heap()),
1702 _mark_context(ShenandoahHeap::heap()->marking_context()),
1703 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
1704 _thread(Thread::current()),
1705 _dead_counter(0) {
1706 }
1707
1708 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
1709 const oop obj = RawAccess<>::oop_load(p);
1710 if (!CompressedOops::is_null(obj)) {
1711 if (!_mark_context->is_marked(obj)) {
1712 shenandoah_assert_correct(p, obj);
1713 oop old = Atomic::cmpxchg(p, obj, oop(NULL));
1714 if (obj == old) {
1715 _dead_counter ++;
1716 }
1717 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
1718 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1719 if (resolved == obj) {
1720 resolved = _heap->evacuate_object(obj, _thread);
1721 }
1722 Atomic::cmpxchg(p, obj, resolved);
1723 assert(_heap->cancelled_gc() ||
1724 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
1725 "Sanity");
1726 }
1727 }
1728 }
1729
1730 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
1731 ShouldNotReachHere();
1732 }
1733
1734 size_t ShenandoahEvacUpdateCleanupOopStorageRootsClosure::dead_counter() const {
1735 return _dead_counter;
1736 }
1737
1738 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() {
1739 _dead_counter = 0;
1740 }
1741
1742 class ShenandoahIsCLDAliveClosure : public CLDClosure {
1743 public:
1744 void do_cld(ClassLoaderData* cld) {
1745 cld->is_alive();
1746 }
1747 };
1748
1749 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
1750 public:
1751 void do_nmethod(nmethod* n) {
1752 n->is_unloading();
1753 }
1754 };
1755
1756 // This task not only evacuates/updates marked weak roots, but also "NULL"
1757 // dead weak roots.
1758 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
1759 private:
1760 ShenandoahWeakRoot<true /*concurrent*/> _jni_roots;
1761 ShenandoahWeakRoot<true /*concurrent*/> _string_table_roots;
1762 ShenandoahWeakRoot<true /*concurrent*/> _resolved_method_table_roots;
1763 ShenandoahWeakRoot<true /*concurrent*/> _vm_roots;
1764
1765 // Roots related to concurrent class unloading
1766 ShenandoahClassLoaderDataRoots<true /* concurrent */, false /* single thread*/>
1767 _cld_roots;
1768 ShenandoahConcurrentNMethodIterator _nmethod_itr;
1769 ShenandoahConcurrentStringDedupRoots _dedup_roots;
1770 bool _concurrent_class_unloading;
1771
1772 public:
1773 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1774 AbstractGangTask("Shenandoah Concurrent Weak Root Task"),
1775 _jni_roots(OopStorageSet::jni_weak(), phase, ShenandoahPhaseTimings::JNIWeakRoots),
1776 _string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots),
1777 _resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots),
1778 _vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots),
1779 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
1780 _nmethod_itr(ShenandoahCodeRoots::table()),
1781 _dedup_roots(phase),
1782 _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1783 StringTable::reset_dead_counter();
1784 ResolvedMethodTable::reset_dead_counter();
1785 if (_concurrent_class_unloading) {
1786 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1787 _nmethod_itr.nmethods_do_begin();
1788 }
1789 }
1790
1791 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1792 StringTable::finish_dead_counter();
1793 ResolvedMethodTable::finish_dead_counter();
1794 if (_concurrent_class_unloading) {
1795 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1796 _nmethod_itr.nmethods_do_end();
1797 }
1798 }
1799
1800 void work(uint worker_id) {
1801 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1802 {
1803 ShenandoahEvacOOMScope oom;
1804 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1805 // may race against OopStorage::release() calls.
1806 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1807 _jni_roots.oops_do(&cl, worker_id);
1808 _vm_roots.oops_do(&cl, worker_id);
1809
1810 cl.reset_dead_counter();
1811 _string_table_roots.oops_do(&cl, worker_id);
1812 StringTable::inc_dead_counter(cl.dead_counter());
1813
1814 cl.reset_dead_counter();
1815 _resolved_method_table_roots.oops_do(&cl, worker_id);
1816 ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
1817
1818 // String dedup weak roots
1819 ShenandoahForwardedIsAliveClosure is_alive;
1820 ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1821 _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1822 }
1823
1824 // If we are going to perform concurrent class unloading later on, we need to
1825 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1826 // can cleanup immediate garbage sooner.
1827 if (_concurrent_class_unloading) {
1828 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
1829 // CLD's holder or evacuate it.
1830 ShenandoahIsCLDAliveClosure is_cld_alive;
1831 _cld_roots.cld_do(&is_cld_alive, worker_id);
1832
1833 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1834 // The closure calls nmethod->is_unloading(). The is_unloading
1835 // state is cached, therefore, during concurrent class unloading phase,
1836 // we will not touch the metadata of unloading nmethods
1837 ShenandoahIsNMethodAliveClosure is_nmethod_alive;
|
1669 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1670 // may race against OopStorage::release() calls.
1671 ShenandoahEvacUpdateOopStorageRootsClosure cl;
1672 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
1673 }
1674
1675 {
1676 ShenandoahEvacuateUpdateRootsClosure<> cl;
1677 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1678 _cld_roots.cld_do(&clds, worker_id);
1679 }
1680 }
1681 };
1682
1683 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1684 private:
1685 ShenandoahHeap* const _heap;
1686 ShenandoahMarkingContext* const _mark_context;
1687 bool _evac_in_progress;
1688 Thread* const _thread;
1689
1690 public:
1691 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
1692 void do_oop(oop* p);
1693 void do_oop(narrowOop* p);
1694 };
1695
1696 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
1697 _heap(ShenandoahHeap::heap()),
1698 _mark_context(ShenandoahHeap::heap()->marking_context()),
1699 _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
1700 _thread(Thread::current()) {
1701 }
1702
1703 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
1704 const oop obj = RawAccess<>::oop_load(p);
1705 if (!CompressedOops::is_null(obj)) {
1706 if (!_mark_context->is_marked(obj)) {
1707 shenandoah_assert_correct(p, obj);
1708 Atomic::cmpxchg(p, obj, oop(NULL));
1709 } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
1710 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1711 if (resolved == obj) {
1712 resolved = _heap->evacuate_object(obj, _thread);
1713 }
1714 Atomic::cmpxchg(p, obj, resolved);
1715 assert(_heap->cancelled_gc() ||
1716 _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
1717 "Sanity");
1718 }
1719 }
1720 }
1721
1722 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
1723 ShouldNotReachHere();
1724 }
1725
1726 class ShenandoahIsCLDAliveClosure : public CLDClosure {
1727 public:
1728 void do_cld(ClassLoaderData* cld) {
1729 cld->is_alive();
1730 }
1731 };
1732
1733 class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
1734 public:
1735 void do_nmethod(nmethod* n) {
1736 n->is_unloading();
1737 }
1738 };
1739
1740 // This task not only evacuates/updates marked weak roots, but also "NULL"
1741 // dead weak roots.
1742 class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
1743 private:
1744 ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
1745
1746 // Roots related to concurrent class unloading
1747 ShenandoahClassLoaderDataRoots<true /* concurrent */, false /* single thread*/>
1748 _cld_roots;
1749 ShenandoahConcurrentNMethodIterator _nmethod_itr;
1750 ShenandoahConcurrentStringDedupRoots _dedup_roots;
1751 bool _concurrent_class_unloading;
1752
1753 public:
1754 ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1755 AbstractGangTask("Shenandoah Concurrent Weak Root Task"),
1756 _vm_roots(phase),
1757 _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
1758 _nmethod_itr(ShenandoahCodeRoots::table()),
1759 _dedup_roots(phase),
1760 _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1761 if (_concurrent_class_unloading) {
1762 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1763 _nmethod_itr.nmethods_do_begin();
1764 }
1765 }
1766
1767 ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
1768 if (_concurrent_class_unloading) {
1769 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1770 _nmethod_itr.nmethods_do_end();
1771 }
1772 // Notify runtime data structures of potentially dead oops
1773 _vm_roots.notify();
1774 }
1775
1776 void work(uint worker_id) {
1777 ShenandoahConcurrentWorkerSession worker_session(worker_id);
1778 {
1779 ShenandoahEvacOOMScope oom;
1780 // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
1781 // may race against OopStorage::release() calls.
1782 ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
1783 _vm_roots.oops_do(&cl, worker_id);
1784
1785 // String dedup weak roots
1786 ShenandoahForwardedIsAliveClosure is_alive;
1787 ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1788 _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1789 }
1790
1791 // If we are going to perform concurrent class unloading later on, we need to
1792 // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
1793 // can cleanup immediate garbage sooner.
1794 if (_concurrent_class_unloading) {
1795 // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
1796 // CLD's holder or evacuate it.
1797 ShenandoahIsCLDAliveClosure is_cld_alive;
1798 _cld_roots.cld_do(&is_cld_alive, worker_id);
1799
1800 // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
1801 // The closure calls nmethod->is_unloading(). The is_unloading
1802 // state is cached, therefore, during concurrent class unloading phase,
1803 // we will not touch the metadata of unloading nmethods
1804 ShenandoahIsNMethodAliveClosure is_nmethod_alive;
|