1693 if (_c->_foregroundGCIsActive) {
1694 CGC_lock->notify();
1695 }
1696 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1697 "Possible deadlock");
1698 }
1699
1700 ~ReleaseForegroundGC() {
1701 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1702 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1703 _c->_foregroundGCShouldWait = true;
1704 }
1705 };
1706
1707 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1708 assert(Thread::current()->is_ConcurrentGC_thread(),
1709 "A CMS asynchronous collection is only allowed on a CMS thread.");
1710
1711 CMSHeap* heap = CMSHeap::heap();
1712 {
1713 bool safepoint_check = Mutex::_no_safepoint_check_flag;
1714 MutexLockerEx hl(Heap_lock, safepoint_check);
1715 FreelistLocker fll(this);
1716 MutexLockerEx x(CGC_lock, safepoint_check);
1717 if (_foregroundGCIsActive) {
1718 // The foreground collector is. Skip this
1719 // background collection.
1720 assert(!_foregroundGCShouldWait, "Should be clear");
1721 return;
1722 } else {
1723 assert(_collectorState == Idling, "Should be idling before start.");
1724 _collectorState = InitialMarking;
1725 register_gc_start(cause);
1726 // Reset the expansion cause, now that we are about to begin
1727 // a new cycle.
1728 clear_expansion_cause();
1729
1730 // Clear the MetaspaceGC flag since a concurrent collection
1731 // is starting but also clear it after the collection.
1732 MetaspaceGC::set_should_concurrent_collect(false);
1733 }
1734 // Decide if we want to enable class unloading as part of the
1735 // ensuing concurrent GC cycle.
1736 update_should_unload_classes();
1840
1841 VM_CMS_Final_Remark final_remark_op(this);
1842 VMThread::execute(&final_remark_op);
1843 }
1844 assert(_foregroundGCShouldWait, "block post-condition");
1845 break;
1846 case Sweeping:
1847 // final marking in checkpointRootsFinal has been completed
1848 sweep();
1849 assert(_collectorState == Resizing, "Collector state change "
1850 "to Resizing must be done under the free_list_lock");
1851
1852 case Resizing: {
1853 // Sweeping has been completed...
1854 // At this point the background collection has completed.
1855 // Don't move the call to compute_new_size() down
1856 // into code that might be executed if the background
1857 // collection was preempted.
1858 {
1859 ReleaseForegroundGC x(this); // unblock FG collection
1860 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
1861 CMSTokenSync z(true); // not strictly needed.
1862 if (_collectorState == Resizing) {
1863 compute_new_size();
1864 save_heap_summary();
1865 _collectorState = Resetting;
1866 } else {
1867 assert(_collectorState == Idling, "The state should only change"
1868 " because the foreground collector has finished the collection");
1869 }
1870 }
1871 break;
1872 }
1873 case Resetting:
1874 // CMS heap resizing has been completed
1875 reset_concurrent();
1876 assert(_collectorState == Idling, "Collector state should "
1877 "have changed");
1878
1879 MetaspaceGC::set_should_concurrent_collect(false);
1880
7941 // in such low resource situations.
7942 void CMSCollector::preserve_mark_work(oop p, markOop m) {
7943 _preserved_oop_stack.push(p);
7944 _preserved_mark_stack.push(m);
7945 assert(m == p->mark_raw(), "Mark word changed");
7946 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
7947 "bijection");
7948 }
7949
7950 // Single threaded
7951 void CMSCollector::preserve_mark_if_necessary(oop p) {
7952 markOop m = p->mark_raw();
7953 if (m->must_be_preserved(p)) {
7954 preserve_mark_work(p, m);
7955 }
7956 }
7957
7958 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
7959 markOop m = p->mark_raw();
7960 if (m->must_be_preserved(p)) {
7961 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
7962 // Even though we read the mark word without holding
7963 // the lock, we are assured that it will not change
7964 // because we "own" this oop, so no other thread can
7965 // be trying to push it on the overflow list; see
7966 // the assertion in preserve_mark_work() that checks
7967 // that m == p->mark_raw().
7968 preserve_mark_work(p, m);
7969 }
7970 }
7971
7972 // We should be able to do this multi-threaded,
7973 // a chunk of stack being a task (this is
7974 // correct because each oop only ever appears
7975 // once in the overflow list. However, it's
7976 // not very easy to completely overlap this with
7977 // other operations, so will generally not be done
7978 // until all work's been completed. Because we
7979 // expect the preserved oop stack (set) to be small,
7980 // it's probably fine to do this single-threaded.
7981 // We can explore cleverer concurrent/overlapped/parallel
|
1693 if (_c->_foregroundGCIsActive) {
1694 CGC_lock->notify();
1695 }
1696 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1697 "Possible deadlock");
1698 }
1699
1700 ~ReleaseForegroundGC() {
1701 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1702 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1703 _c->_foregroundGCShouldWait = true;
1704 }
1705 };
1706
1707 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1708 assert(Thread::current()->is_ConcurrentGC_thread(),
1709 "A CMS asynchronous collection is only allowed on a CMS thread.");
1710
1711 CMSHeap* heap = CMSHeap::heap();
1712 {
1713 MutexLocker hl(Heap_lock);
1714 FreelistLocker fll(this);
1715 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1716 if (_foregroundGCIsActive) {
1717 // The foreground collector is. Skip this
1718 // background collection.
1719 assert(!_foregroundGCShouldWait, "Should be clear");
1720 return;
1721 } else {
1722 assert(_collectorState == Idling, "Should be idling before start.");
1723 _collectorState = InitialMarking;
1724 register_gc_start(cause);
1725 // Reset the expansion cause, now that we are about to begin
1726 // a new cycle.
1727 clear_expansion_cause();
1728
1729 // Clear the MetaspaceGC flag since a concurrent collection
1730 // is starting but also clear it after the collection.
1731 MetaspaceGC::set_should_concurrent_collect(false);
1732 }
1733 // Decide if we want to enable class unloading as part of the
1734 // ensuing concurrent GC cycle.
1735 update_should_unload_classes();
1839
1840 VM_CMS_Final_Remark final_remark_op(this);
1841 VMThread::execute(&final_remark_op);
1842 }
1843 assert(_foregroundGCShouldWait, "block post-condition");
1844 break;
1845 case Sweeping:
1846 // final marking in checkpointRootsFinal has been completed
1847 sweep();
1848 assert(_collectorState == Resizing, "Collector state change "
1849 "to Resizing must be done under the free_list_lock");
1850
1851 case Resizing: {
1852 // Sweeping has been completed...
1853 // At this point the background collection has completed.
1854 // Don't move the call to compute_new_size() down
1855 // into code that might be executed if the background
1856 // collection was preempted.
1857 {
1858 ReleaseForegroundGC x(this); // unblock FG collection
1859 MutexLocker y(Heap_lock);
1860 CMSTokenSync z(true); // not strictly needed.
1861 if (_collectorState == Resizing) {
1862 compute_new_size();
1863 save_heap_summary();
1864 _collectorState = Resetting;
1865 } else {
1866 assert(_collectorState == Idling, "The state should only change"
1867 " because the foreground collector has finished the collection");
1868 }
1869 }
1870 break;
1871 }
1872 case Resetting:
1873 // CMS heap resizing has been completed
1874 reset_concurrent();
1875 assert(_collectorState == Idling, "Collector state should "
1876 "have changed");
1877
1878 MetaspaceGC::set_should_concurrent_collect(false);
1879
7940 // in such low resource situations.
7941 void CMSCollector::preserve_mark_work(oop p, markOop m) {
7942 _preserved_oop_stack.push(p);
7943 _preserved_mark_stack.push(m);
7944 assert(m == p->mark_raw(), "Mark word changed");
7945 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
7946 "bijection");
7947 }
7948
7949 // Single threaded
7950 void CMSCollector::preserve_mark_if_necessary(oop p) {
7951 markOop m = p->mark_raw();
7952 if (m->must_be_preserved(p)) {
7953 preserve_mark_work(p, m);
7954 }
7955 }
7956
7957 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
7958 markOop m = p->mark_raw();
7959 if (m->must_be_preserved(p)) {
7960 MutexLocker x(ParGCRareEvent_lock);
7961 // Even though we read the mark word without holding
7962 // the lock, we are assured that it will not change
7963 // because we "own" this oop, so no other thread can
7964 // be trying to push it on the overflow list; see
7965 // the assertion in preserve_mark_work() that checks
7966 // that m == p->mark_raw().
7967 preserve_mark_work(p, m);
7968 }
7969 }
7970
7971 // We should be able to do this multi-threaded,
7972 // a chunk of stack being a task (this is
7973 // correct because each oop only ever appears
7974 // once in the overflow list. However, it's
7975 // not very easy to completely overlap this with
7976 // other operations, so will generally not be done
7977 // until all work's been completed. Because we
7978 // expect the preserved oop stack (set) to be small,
7979 // it's probably fine to do this single-threaded.
7980 // We can explore cleverer concurrent/overlapped/parallel
|