src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"



  40 #include "gc_implementation/shared/isGCActiveMark.hpp"
  41 #include "gc_interface/collectedHeap.inline.hpp"

  42 #include "memory/cardTableRS.hpp"
  43 #include "memory/collectorPolicy.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/genCollectedHeap.hpp"
  46 #include "memory/genMarkSweep.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/referencePolicy.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/tenuredGeneration.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "prims/jvmtiExport.hpp"
  54 #include "runtime/globals_extension.hpp"
  55 #include "runtime/handles.inline.hpp"
  56 #include "runtime/java.hpp"
  57 #include "runtime/vmThread.hpp"
  58 #include "services/memoryService.hpp"
  59 #include "services/runtimeService.hpp"
  60 
  61 // statics
  62 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  63 bool          CMSCollector::_full_gc_requested          = false;

  64 
  65 //////////////////////////////////////////////////////////////////
  66 // In support of CMS/VM thread synchronization
  67 //////////////////////////////////////////////////////////////////
  68 // We split use of the CGC_lock into 2 "levels".
  69 // The low-level locking is of the usual CGC_lock monitor. We introduce
  70 // a higher level "token" (hereafter "CMS token") built on top of the
  71 // low level monitor (hereafter "CGC lock").
  72 // The token-passing protocol gives priority to the VM thread. The
  73 // CMS-lock doesn't provide any fairness guarantees, but clients
  74 // should ensure that it is only held for very short, bounded
  75 // durations.
  76 //
  77 // When either of the CMS thread or the VM thread is involved in
  78 // collection operations during which it does not want the other
  79 // thread to interfere, it obtains the CMS token.
  80 //
  81 // If either thread tries to get the token while the other has
  82 // it, that thread waits. However, if the VM thread and CMS thread
  83 // both want the token, then the VM thread gets priority while the


 574   _ser_pmc_preclean_ovflw(0),
 575   _ser_kac_preclean_ovflw(0),
 576   _ser_pmc_remark_ovflw(0),
 577   _par_pmc_remark_ovflw(0),
 578   _ser_kac_ovflw(0),
 579   _par_kac_ovflw(0),
 580 #ifndef PRODUCT
 581   _num_par_pushes(0),
 582 #endif
 583   _collection_count_start(0),
 584   _verifying(false),
 585   _icms_start_limit(NULL),
 586   _icms_stop_limit(NULL),
 587   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 588   _completed_initialization(false),
 589   _collector_policy(cp),
 590   _should_unload_classes(false),
 591   _concurrent_cycles_since_last_unload(0),
 592   _roots_scanning_options(0),
 593   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 594   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)



 595 {
 596   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 597     ExplicitGCInvokesConcurrent = true;
 598   }
 599   // Now expand the span and allocate the collection support structures
 600   // (MUT, marking bit map etc.) to cover both generations subject to
 601   // collection.
 602 
 603   // For use by dirty card to oop closures.
 604   _cmsGen->cmsSpace()->set_collector(this);
 605 
 606   // Allocate MUT and marking bit map
 607   {
 608     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 609     if (!_markBitMap.allocate(_span)) {
 610       warning("Failed to allocate CMS Bit Map");
 611       return;
 612     }
 613     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 614   }


1659   }
1660 
1661   // The following "if" branch is present for defensive reasons.
1662   // In the current uses of this interface, it can be replaced with:
1663   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1664   // But I am not placing that assert here to allow future
1665   // generality in invoking this interface.
1666   if (GC_locker::is_active()) {
1667     // A consistency test for GC_locker
1668     assert(GC_locker::needs_gc(), "Should have been set already");
1669     // Skip this foreground collection, instead
1670     // expanding the heap if necessary.
1671     // Need the free list locks for the call to free() in compute_new_size()
1672     compute_new_size();
1673     return;
1674   }
1675   acquire_control_and_collect(full, clear_all_soft_refs);
1676   _full_gcs_since_conc_gc++;
1677 }
1678 
1679 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1680   GenCollectedHeap* gch = GenCollectedHeap::heap();
1681   unsigned int gc_count = gch->total_full_collections();
1682   if (gc_count == full_gc_count) {
1683     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1684     _full_gc_requested = true;

1685     CGC_lock->notify();   // nudge CMS thread
1686   } else {
1687     assert(gc_count > full_gc_count, "Error: causal loop");
1688   }
1689 }
1690 



















1691 
1692 // The foreground and background collectors need to coordinate in order
1693 // to make sure that they do not mutually interfere with CMS collections.
1694 // When a background collection is active,
1695 // the foreground collector may need to take over (preempt) and
1696 // synchronously complete an ongoing collection. Depending on the
1697 // frequency of the background collections and the heap usage
1698 // of the application, this preemption can be seldom or frequent.
1699 // There are only certain
1700 // points in the background collection that the "collection-baton"
1701 // can be passed to the foreground collector.
1702 //
1703 // The foreground collector will wait for the baton before
1704 // starting any part of the collection.  The foreground collector
1705 // will only wait at one location.
1706 //
1707 // The background collector will yield the baton before starting a new
1708 // phase of the collection (e.g., before initial marking, marking from roots,
1709 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1710 // of the loop which switches the phases. The background collector does some


1828 
1829   // Check if we need to do a compaction, or if not, whether
1830   // we need to start the mark-sweep from scratch.
1831   bool should_compact    = false;
1832   bool should_start_over = false;
1833   decide_foreground_collection_type(clear_all_soft_refs,
1834     &should_compact, &should_start_over);
1835 
1836 NOT_PRODUCT(
1837   if (RotateCMSCollectionTypes) {
1838     if (_cmsGen->debug_collection_type() ==
1839         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1840       should_compact = true;
1841     } else if (_cmsGen->debug_collection_type() ==
1842                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1843       should_compact = false;
1844     }
1845   }
1846 )
1847 
1848   if (PrintGCDetails && first_state > Idling) {
1849     GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1850     if (GCCause::is_user_requested_gc(cause) ||
1851         GCCause::is_serviceability_requested_gc(cause)) {
1852       gclog_or_tty->print(" (concurrent mode interrupted)");
1853     } else {
1854       gclog_or_tty->print(" (concurrent mode failure)");
1855     }
1856   }
1857 
1858   set_did_compact(should_compact);
1859   if (should_compact) {
1860     // If the collection is being acquired from the background
1861     // collector, there may be references on the discovered
1862     // references lists that have NULL referents (being those
1863     // that were concurrently cleared by a mutator) or
1864     // that are no longer active (having been enqueued concurrently
1865     // by the mutator).
1866     // Scrub the list of those references because Mark-Sweep-Compact
1867     // code assumes referents are not NULL and that all discovered
1868     // Reference objects are active.
1869     ref_processor()->clean_up_discovered_references();
1870 




1871     do_compaction_work(clear_all_soft_refs);
1872 
1873     // Has the GC time limit been exceeded?
1874     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1875     size_t max_eden_size = young_gen->max_capacity() -
1876                            young_gen->to()->capacity() -
1877                            young_gen->from()->capacity();
1878     GenCollectedHeap* gch = GenCollectedHeap::heap();
1879     GCCause::Cause gc_cause = gch->gc_cause();
1880     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1881                                            young_gen->eden()->used(),
1882                                            _cmsGen->max_capacity(),
1883                                            max_eden_size,
1884                                            full,
1885                                            gc_cause,
1886                                            gch->collector_policy());
1887   } else {
1888     do_mark_sweep_work(clear_all_soft_refs, first_state,
1889       should_start_over);
1890   }


1954       // if necessary clear soft refs that weren't previously
1955       // cleared. We do so by remembering the phase in which
1956       // we came in, and if we are past the refs processing
1957       // phase, we'll choose to just redo the mark-sweep
1958       // collection from scratch.
1959       if (_collectorState > FinalMarking) {
1960         // We are past the refs processing phase;
1961         // start over and do a fresh synchronous CMS cycle
1962         _collectorState = Resetting; // skip to reset to start new cycle
1963         reset(false /* == !asynch */);
1964         *should_start_over = true;
1965       } // else we can continue a possibly ongoing current cycle
1966     }
1967   }
1968 }
1969 
1970 // A work method used by the foreground collector to do
1971 // a mark-sweep-compact.
1972 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1973   GenCollectedHeap* gch = GenCollectedHeap::heap();
1974   TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);







1975   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1976     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1977       "collections passed to foreground collector", _full_gcs_since_conc_gc);
1978   }
1979 
1980   // Sample collection interval time and reset for collection pause.
1981   if (UseAdaptiveSizePolicy) {
1982     size_policy()->msc_collection_begin();
1983   }
1984 
1985   // Temporarily widen the span of the weak reference processing to
1986   // the entire heap.
1987   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1988   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1989   // Temporarily, clear the "is_alive_non_header" field of the
1990   // reference processor.
1991   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1992   // Temporarily make reference _processing_ single threaded (non-MT).
1993   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1994   // Temporarily make refs discovery atomic


2045   reset(false /* == !asynch */);
2046   _cmsGen->reset_after_compaction();
2047   _concurrent_cycles_since_last_unload = 0;
2048 
2049   // Clear any data recorded in the PLAB chunk arrays.
2050   if (_survivor_plab_array != NULL) {
2051     reset_survivor_plab_arrays();
2052   }
2053 
2054   // Adjust the per-size allocation stats for the next epoch.
2055   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2056   // Restart the "inter sweep timer" for the next epoch.
2057   _inter_sweep_timer.reset();
2058   _inter_sweep_timer.start();
2059 
2060   // Sample collection pause time and reset for collection interval.
2061   if (UseAdaptiveSizePolicy) {
2062     size_policy()->msc_collection_end(gch->gc_cause());
2063   }
2064 




2065   // For a mark-sweep-compact, compute_new_size() will be called
2066   // in the heap's do_collection() method.
2067 }
2068 
2069 // A work method used by the foreground collector to do
2070 // a mark-sweep, after taking over from a possibly on-going
2071 // concurrent mark-sweep collection.
2072 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2073   CollectorState first_state, bool should_start_over) {
2074   if (PrintGC && Verbose) {
2075     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2076       "collector with count %d",
2077       _full_gcs_since_conc_gc);
2078   }
2079   switch (_collectorState) {
2080     case Idling:
2081       if (first_state == Idling || should_start_over) {
2082         // The background GC was not active, or should
2083         // restarted from scratch;  start the cycle.
2084         _collectorState = InitialMarking;
2085       }
2086       // If first_state was not Idling, then a background GC
2087       // was in progress and has now finished.  No need to do it
2088       // again.  Leave the state as Idling.
2089       break;
2090     case Precleaning:
2091       // In the foreground case don't do the precleaning since
2092       // it is not done concurrently and there is extra work
2093       // required.
2094       _collectorState = FinalMarking;
2095   }
2096   collect_in_foreground(clear_all_soft_refs);
2097 
2098   // For a mark-sweep, compute_new_size() will be called
2099   // in the heap's do_collection() method.
2100 }
2101 
2102 
2103 void CMSCollector::getFreelistLocks() const {
2104   // Get locks for all free lists in all generations that this
2105   // collector is responsible for
2106   _cmsGen->freelistLock()->lock_without_safepoint_check();
2107 }
2108 
2109 void CMSCollector::releaseFreelistLocks() const {
2110   // Release locks for all free lists in all generations that this
2111   // collector is responsible for
2112   _cmsGen->freelistLock()->unlock();
2113 }
2114 
2115 bool CMSCollector::haveFreelistLocks() const {
2116   // Check locks for all free lists in all generations that this


2136     if (_c->_foregroundGCIsActive) {
2137       CGC_lock->notify();
2138     }
2139     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2140            "Possible deadlock");
2141   }
2142 
2143   ~ReleaseForegroundGC() {
2144     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2145     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2146     _c->_foregroundGCShouldWait = true;
2147   }
2148 };
2149 
2150 // There are separate collect_in_background and collect_in_foreground because of
2151 // the different locking requirements of the background collector and the
2152 // foreground collector.  There was originally an attempt to share
2153 // one "collect" method between the background collector and the foreground
2154 // collector but the if-then-else required made it cleaner to have
2155 // separate methods.
2156 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2157   assert(Thread::current()->is_ConcurrentGC_thread(),
2158     "A CMS asynchronous collection is only allowed on a CMS thread.");
2159 
2160   GenCollectedHeap* gch = GenCollectedHeap::heap();
2161   {
2162     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2163     MutexLockerEx hl(Heap_lock, safepoint_check);
2164     FreelistLocker fll(this);
2165     MutexLockerEx x(CGC_lock, safepoint_check);
2166     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2167       // The foreground collector is active or we're
2168       // not using asynchronous collections.  Skip this
2169       // background collection.
2170       assert(!_foregroundGCShouldWait, "Should be clear");
2171       return;
2172     } else {
2173       assert(_collectorState == Idling, "Should be idling before start.");
2174       _collectorState = InitialMarking;

2175       // Reset the expansion cause, now that we are about to begin
2176       // a new cycle.
2177       clear_expansion_cause();
2178 
2179       // Clear the MetaspaceGC flag since a concurrent collection
2180       // is starting but also clear it after the collection.
2181       MetaspaceGC::set_should_concurrent_collect(false);
2182     }
2183     // Decide if we want to enable class unloading as part of the
2184     // ensuing concurrent GC cycle.
2185     update_should_unload_classes();
2186     _full_gc_requested = false;           // acks all outstanding full gc requests

2187     // Signal that we are about to start a collection
2188     gch->increment_total_full_collections();  // ... starting a collection cycle
2189     _collection_count_start = gch->total_full_collections();
2190   }
2191 
2192   // Used for PrintGC
2193   size_t prev_used;
2194   if (PrintGC && Verbose) {
2195     prev_used = _cmsGen->used(); // XXXPERM
2196   }
2197 
2198   // The change of the collection state is normally done at this level;
2199   // the exceptions are phases that are executed while the world is
2200   // stopped.  For those phases the change of state is done while the
2201   // world is stopped.  For baton passing purposes this allows the
2202   // background collector to finish the phase and change state atomically.
2203   // The foreground collector cannot wait on a phase that is done
2204   // while the world is stopped because the foreground collector already
2205   // has the world stopped and would deadlock.
2206   while (_collectorState != Idling) {


2246       } else {
2247         // The background collector can run but check to see if the
2248         // foreground collector has done a collection while the
2249         // background collector was waiting to get the CGC_lock
2250         // above.  If yes, break so that _foregroundGCShouldWait
2251         // is cleared before returning.
2252         if (_collectorState == Idling) {
2253           break;
2254         }
2255       }
2256     }
2257 
2258     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2259       "should be waiting");
2260 
2261     switch (_collectorState) {
2262       case InitialMarking:
2263         {
2264           ReleaseForegroundGC x(this);
2265           stats().record_cms_begin();
2266 
2267           VM_CMS_Initial_Mark initial_mark_op(this);
2268           VMThread::execute(&initial_mark_op);
2269         }
2270         // The collector state may be any legal state at this point
2271         // since the background collector may have yielded to the
2272         // foreground collector.
2273         break;
2274       case Marking:
2275         // initial marking in checkpointRootsInitialWork has been completed
2276         if (markFromRoots(true)) { // we were successful
2277           assert(_collectorState == Precleaning, "Collector state should "
2278             "have changed");
2279         } else {
2280           assert(_foregroundGCIsActive, "Internal state inconsistency");
2281         }
2282         break;
2283       case Precleaning:
2284         if (UseAdaptiveSizePolicy) {
2285           size_policy()->concurrent_precleaning_begin();
2286         }


2326         // Stop the timers for adaptive size policy for the concurrent phases
2327         if (UseAdaptiveSizePolicy) {
2328           size_policy()->concurrent_sweeping_end();
2329           size_policy()->concurrent_phases_end(gch->gc_cause(),
2330                                              gch->prev_gen(_cmsGen)->capacity(),
2331                                              _cmsGen->free());
2332         }
2333 
2334       case Resizing: {
2335         // Sweeping has been completed...
2336         // At this point the background collection has completed.
2337         // Don't move the call to compute_new_size() down
2338         // into code that might be executed if the background
2339         // collection was preempted.
2340         {
2341           ReleaseForegroundGC x(this);   // unblock FG collection
2342           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2343           CMSTokenSync        z(true);   // not strictly needed.
2344           if (_collectorState == Resizing) {
2345             compute_new_size();

2346             _collectorState = Resetting;
2347           } else {
2348             assert(_collectorState == Idling, "The state should only change"
2349                    " because the foreground collector has finished the collection");
2350           }
2351         }
2352         break;
2353       }
2354       case Resetting:
2355         // CMS heap resizing has been completed
2356         reset(true);
2357         assert(_collectorState == Idling, "Collector state should "
2358           "have changed");
2359 
2360         MetaspaceGC::set_should_concurrent_collect(false);
2361 
2362         stats().record_cms_end();
2363         // Don't move the concurrent_phases_end() and compute_new_size()
2364         // calls to here because a preempted background collection
2365         // has it's state set to "Resetting".


2384     // foreground collector is waiting, notify it, before
2385     // returning.
2386     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2387     _foregroundGCShouldWait = false;
2388     if (_foregroundGCIsActive) {
2389       CGC_lock->notify();
2390     }
2391     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2392            "Possible deadlock");
2393   }
2394   if (TraceCMSState) {
2395     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2396       " exiting collection CMS state %d",
2397       Thread::current(), _collectorState);
2398   }
2399   if (PrintGC && Verbose) {
2400     _cmsGen->print_heap_change(prev_used);
2401   }
2402 }
2403 
2404 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
































2405   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2406          "Foreground collector should be waiting, not executing");
2407   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2408     "may only be done by the VM Thread with the world stopped");
2409   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2410          "VM thread should have CMS token");
2411 
2412   NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2413     true, gclog_or_tty);)
2414   if (UseAdaptiveSizePolicy) {
2415     size_policy()->ms_collection_begin();
2416   }
2417   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2418 
2419   HandleMark hm;  // Discard invalid handles created during verification
2420 
2421   if (VerifyBeforeGC &&
2422       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2423     Universe::verify();
2424   }
2425 
2426   // Snapshot the soft reference policy to be used in this collection cycle.
2427   ref_processor()->setup_policy(clear_all_soft_refs);
2428 
2429   bool init_mark_was_synchronous = false; // until proven otherwise
2430   while (_collectorState != Idling) {
2431     if (TraceCMSState) {
2432       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2433         Thread::current(), _collectorState);
2434     }
2435     switch (_collectorState) {
2436       case InitialMarking:

2437         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2438         checkpointRootsInitial(false);
2439         assert(_collectorState == Marking, "Collector state should have changed"
2440           " within checkpointRootsInitial()");
2441         break;
2442       case Marking:
2443         // initial marking in checkpointRootsInitialWork has been completed
2444         if (VerifyDuringGC &&
2445             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2446           Universe::verify("Verify before initial mark: ");
2447         }
2448         {
2449           bool res = markFromRoots(false);
2450           assert(res && _collectorState == FinalMarking, "Collector state should "
2451             "have changed");
2452           break;
2453         }
2454       case FinalMarking:
2455         if (VerifyDuringGC &&
2456             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {


2465         // final marking in checkpointRootsFinal has been completed
2466         if (VerifyDuringGC &&
2467             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2468           Universe::verify("Verify before sweep: ");
2469         }
2470         sweep(false);
2471         assert(_collectorState == Resizing, "Incorrect state");
2472         break;
2473       case Resizing: {
2474         // Sweeping has been completed; the actual resize in this case
2475         // is done separately; nothing to be done in this state.
2476         _collectorState = Resetting;
2477         break;
2478       }
2479       case Resetting:
2480         // The heap has been resized.
2481         if (VerifyDuringGC &&
2482             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2483           Universe::verify("Verify before reset: ");
2484         }

2485         reset(false);
2486         assert(_collectorState == Idling, "Collector state should "
2487           "have changed");
2488         break;
2489       case Precleaning:
2490       case AbortablePreclean:
2491         // Elide the preclean phase
2492         _collectorState = FinalMarking;
2493         break;
2494       default:
2495         ShouldNotReachHere();
2496     }
2497     if (TraceCMSState) {
2498       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2499         Thread::current(), _collectorState);
2500     }
2501   }
2502 
2503   if (UseAdaptiveSizePolicy) {
2504     GenCollectedHeap* gch = GenCollectedHeap::heap();


3487     if (_print_cr) {
3488       gclog_or_tty->print_cr("");
3489     }
3490     if (PrintCMSStatistics != 0) {
3491       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3492                     _collector->yields());
3493     }
3494   }
3495 }
3496 
3497 // CMS work
3498 
3499 // Checkpoint the roots into this generation from outside
3500 // this generation. [Note this initial checkpoint need only
3501 // be approximate -- we'll do a catch up phase subsequently.]
3502 void CMSCollector::checkpointRootsInitial(bool asynch) {
3503   assert(_collectorState == InitialMarking, "Wrong collector state");
3504   check_correct_thread_executing();
3505   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3506 



3507   ReferenceProcessor* rp = ref_processor();
3508   SpecializationStats::clear();
3509   assert(_restart_addr == NULL, "Control point invariant");
3510   if (asynch) {
3511     // acquire locks for subsequent manipulations
3512     MutexLockerEx x(bitMapLock(),
3513                     Mutex::_no_safepoint_check_flag);
3514     checkpointRootsInitialWork(asynch);
3515     // enable ("weak") refs discovery
3516     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3517     _collectorState = Marking;
3518   } else {
3519     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3520     // which recognizes if we are a CMS generation, and doesn't try to turn on
3521     // discovery; verify that they aren't meddling.
3522     assert(!rp->discovery_is_atomic(),
3523            "incorrect setting of discovery predicate");
3524     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3525            "ref discovery for this generation kind");
3526     // already have locks


3532   SpecializationStats::print();
3533 }
3534 
3535 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3536   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3537   assert(_collectorState == InitialMarking, "just checking");
3538 
3539   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3540   // precede our marking with a collection of all
3541   // younger generations to keep floating garbage to a minimum.
3542   // XXX: we won't do this for now -- it's an optimization to be done later.
3543 
3544   // already have locks
3545   assert_lock_strong(bitMapLock());
3546   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3547 
3548   // Setup the verification and class unloading state for this
3549   // CMS collection cycle.
3550   setup_cms_unloading_and_verification_state();
3551 
3552   NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3553     PrintGCDetails && Verbose, true, gclog_or_tty);)
3554   if (UseAdaptiveSizePolicy) {
3555     size_policy()->checkpoint_roots_initial_begin();
3556   }
3557 
3558   // Reset all the PLAB chunk arrays if necessary.
3559   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3560     reset_survivor_plab_arrays();
3561   }
3562 
3563   ResourceMark rm;
3564   HandleMark  hm;
3565 
3566   FalseClosure falseClosure;
3567   // In the case of a synchronous collection, we will elide the
3568   // remark step, so it's important to catch all the nmethod oops
3569   // in this step.
3570   // The final 'true' flag to gen_process_strong_roots will ensure this.
3571   // If 'async' is true, we can relax the nmethod tracing.
3572   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3573   GenCollectedHeap* gch = GenCollectedHeap::heap();


4525     // collection because we don't want to take CPU
4526     // or memory bandwidth away from the young GC threads
4527     // (which may be as many as there are CPUs).
4528     // Note that we don't need to protect ourselves from
4529     // interference with mutators because they can't
4530     // manipulate the discovered reference lists nor affect
4531     // the computed reachability of the referents, the
4532     // only properties manipulated by the precleaning
4533     // of these reference lists.
4534     stopTimer();
4535     CMSTokenSyncWithLocks x(true /* is cms thread */,
4536                             bitMapLock());
4537     startTimer();
4538     sample_eden();
4539 
4540     // The following will yield to allow foreground
4541     // collection to proceed promptly. XXX YSR:
4542     // The code in this method may need further
4543     // tweaking for better performance and some restructuring
4544     // for cleaner interfaces.

4545     rp->preclean_discovered_references(
4546           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl);

4547   }
4548 
4549   if (clean_survivor) {  // preclean the active survivor space(s)
4550     assert(_young_gen->kind() == Generation::DefNew ||
4551            _young_gen->kind() == Generation::ParNew ||
4552            _young_gen->kind() == Generation::ASParNew,
4553          "incorrect type for cast");
4554     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4555     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4556                              &_markBitMap, &_modUnionTable,
4557                              &_markStack, true /* precleaning phase */);
4558     stopTimer();
4559     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4560                              bitMapLock());
4561     startTimer();
4562     unsigned int before_count =
4563       GenCollectedHeap::heap()->total_collections();
4564     SurvivorSpacePrecleanClosure
4565       sss_cl(this, _span, &_markBitMap, &_markStack,
4566              &pam_cl, before_count, CMSYield);


4868   // world is stopped at this checkpoint
4869   assert(SafepointSynchronize::is_at_safepoint(),
4870          "world should be stopped");
4871   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4872 
4873   verify_work_stacks_empty();
4874   verify_overflow_empty();
4875 
4876   SpecializationStats::clear();
4877   if (PrintGCDetails) {
4878     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4879                         _young_gen->used() / K,
4880                         _young_gen->capacity() / K);
4881   }
4882   if (asynch) {
4883     if (CMSScavengeBeforeRemark) {
4884       GenCollectedHeap* gch = GenCollectedHeap::heap();
4885       // Temporarily set flag to false, GCH->do_collection will
4886       // expect it to be false and set to true
4887       FlagSetting fl(gch->_is_gc_active, false);
4888       NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4889         PrintGCDetails && Verbose, true, gclog_or_tty);)
4890       int level = _cmsGen->level() - 1;
4891       if (level >= 0) {
4892         gch->do_collection(true,        // full (i.e. force, see below)
4893                            false,       // !clear_all_soft_refs
4894                            0,           // size
4895                            false,       // is_tlab
4896                            level        // max_level
4897                           );
4898       }
4899     }
4900     FreelistLocker x(this);
4901     MutexLockerEx y(bitMapLock(),
4902                     Mutex::_no_safepoint_check_flag);
4903     assert(!init_mark_was_synchronous, "but that's impossible!");
4904     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4905   } else {
4906     // already have all the locks
4907     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4908                              init_mark_was_synchronous);
4909   }
4910   verify_work_stacks_empty();
4911   verify_overflow_empty();
4912   SpecializationStats::print();
4913 }
4914 
4915 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4916   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4917 
4918   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4919 
4920   assert(haveFreelistLocks(), "must have free list locks");
4921   assert_lock_strong(bitMapLock());
4922 
4923   if (UseAdaptiveSizePolicy) {
4924     size_policy()->checkpoint_roots_final_begin();
4925   }
4926 
4927   ResourceMark rm;
4928   HandleMark   hm;
4929 
4930   GenCollectedHeap* gch = GenCollectedHeap::heap();
4931 
4932   if (should_unload_classes()) {
4933     CodeCache::gc_prologue();
4934   }
4935   assert(haveFreelistLocks(), "must have free list locks");
4936   assert_lock_strong(bitMapLock());
4937 
4938   if (!init_mark_was_synchronous) {


4949     // we cannot rely on TLAB's having been filled and must do
4950     // so here just in case a scavenge did not happen.
4951     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4952     // Update the saved marks which may affect the root scans.
4953     gch->save_marks();
4954 
4955     {
4956       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4957 
4958       // Note on the role of the mod union table:
4959       // Since the marker in "markFromRoots" marks concurrently with
4960       // mutators, it is possible for some reachable objects not to have been
4961       // scanned. For instance, an only reference to an object A was
4962       // placed in object B after the marker scanned B. Unless B is rescanned,
4963       // A would be collected. Such updates to references in marked objects
4964       // are detected via the mod union table which is the set of all cards
4965       // dirtied since the first checkpoint in this GC cycle and prior to
4966       // the most recent young generation GC, minus those cleaned up by the
4967       // concurrent precleaning.
4968       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
4969         TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4970         do_remark_parallel();
4971       } else {
4972         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4973                     gclog_or_tty);
4974         do_remark_non_parallel();
4975       }
4976     }
4977   } else {
4978     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4979     // The initial mark was stop-world, so there's no rescanning to
4980     // do; go straight on to the next step below.
4981   }
4982   verify_work_stacks_empty();
4983   verify_overflow_empty();
4984 
4985   {
4986     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4987     refProcessingWork(asynch, clear_all_soft_refs);
4988   }
4989   verify_work_stacks_empty();
4990   verify_overflow_empty();
4991 
4992   if (should_unload_classes()) {
4993     CodeCache::gc_epilogue();
4994   }
4995   JvmtiExport::gc_epilogue();
4996 
4997   // If we encountered any (marking stack / work queue) overflow
4998   // events during the current CMS cycle, take appropriate
4999   // remedial measures, where possible, so as to try and avoid
5000   // recurrence of that condition.
5001   assert(_markStack.isEmpty(), "No grey objects");
5002   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5003                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5004   if (ser_ovflw > 0) {
5005     if (PrintCMSStatistics != 0) {
5006       gclog_or_tty->print_cr("Marking stack overflow (benign) "


5027   if (PrintCMSStatistics != 0) {
5028      if (_markStack._hit_limit > 0) {
5029        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5030                               _markStack._hit_limit);
5031      }
5032      if (_markStack._failed_double > 0) {
5033        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5034                               " current capacity "SIZE_FORMAT,
5035                               _markStack._failed_double,
5036                               _markStack.capacity());
5037      }
5038   }
5039   _markStack._hit_limit = 0;
5040   _markStack._failed_double = 0;
5041 
5042   if ((VerifyAfterGC || VerifyDuringGC) &&
5043       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5044     verify_after_remark();
5045   }
5046 


5047   // Change under the freelistLocks.
5048   _collectorState = Sweeping;
5049   // Call isAllClear() under bitMapLock
5050   assert(_modUnionTable.isAllClear(),
5051       "Should be clear by end of the final marking");
5052   assert(_ct->klass_rem_set()->mod_union_is_clear(),
5053       "Should be clear by end of the final marking");
5054   if (UseAdaptiveSizePolicy) {
5055     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5056   }
5057 }
5058 
5059 // Parallel remark task
5060 class CMSParRemarkTask: public AbstractGangTask {
5061   CMSCollector* _collector;
5062   int           _n_workers;
5063   CompactibleFreeListSpace* _cms_space;
5064 
5065   // The per-thread work queues, available here for stealing.
5066   OopTaskQueueSet*       _task_queues;


5680   // as a result of work_q overflow
5681   restore_preserved_marks_if_any();
5682 }
5683 
5684 // Non-parallel version of remark
5685 void CMSCollector::do_remark_non_parallel() {
5686   ResourceMark rm;
5687   HandleMark   hm;
5688   GenCollectedHeap* gch = GenCollectedHeap::heap();
5689   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5690 
5691   MarkRefsIntoAndScanClosure
5692     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5693              &_markStack, this,
5694              false /* should_yield */, false /* not precleaning */);
5695   MarkFromDirtyCardsClosure
5696     markFromDirtyCardsClosure(this, _span,
5697                               NULL,  // space is set further below
5698                               &_markBitMap, &_markStack, &mrias_cl);
5699   {
5700     TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5701     // Iterate over the dirty cards, setting the corresponding bits in the
5702     // mod union table.
5703     {
5704       ModUnionClosure modUnionClosure(&_modUnionTable);
5705       _ct->ct_bs()->dirty_card_iterate(
5706                       _cmsGen->used_region(),
5707                       &modUnionClosure);
5708     }
5709     // Having transferred these marks into the modUnionTable, we just need
5710     // to rescan the marked objects on the dirty cards in the modUnionTable.
5711     // The initial marking may have been done during an asynchronous
5712     // collection so there may be dirty bits in the mod-union table.
5713     const int alignment =
5714       CardTableModRefBS::card_size * BitsPerWord;
5715     {
5716       // ... First handle dirty cards in CMS gen
5717       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5718       MemRegion ur = _cmsGen->used_region();
5719       HeapWord* lb = ur.start();
5720       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5721       MemRegion cms_span(lb, ub);
5722       _modUnionTable.dirty_range_iterate_clear(cms_span,
5723                                                &markFromDirtyCardsClosure);
5724       verify_work_stacks_empty();
5725       if (PrintCMSStatistics != 0) {
5726         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5727           markFromDirtyCardsClosure.num_dirty_cards());
5728       }
5729     }
5730   }
5731   if (VerifyDuringGC &&
5732       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5733     HandleMark hm;  // Discard invalid handles created during verification
5734     Universe::verify();
5735   }
5736   {
5737     TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5738 
5739     verify_work_stacks_empty();
5740 
5741     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5742     GenCollectedHeap::StrongRootsScope srs(gch);
5743     gch->gen_process_strong_roots(_cmsGen->level(),
5744                                   true,  // younger gens as roots
5745                                   false, // use the local StrongRootsScope
5746                                   false, // not scavenging
5747                                   SharedHeap::ScanningOption(roots_scanning_options()),
5748                                   &mrias_cl,
5749                                   true,   // walk code active on stacks
5750                                   NULL,
5751                                   NULL);  // The dirty klasses will be handled below
5752 
5753     assert(should_unload_classes()
5754            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5755            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5756   }
5757 
5758   {
5759     TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty);
5760 
5761     verify_work_stacks_empty();
5762 
5763     // Scan all class loader data objects that might have been introduced
5764     // during concurrent marking.
5765     ResourceMark rm;
5766     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5767     for (int i = 0; i < array->length(); i++) {
5768       mrias_cl.do_class_loader_data(array->at(i));
5769     }
5770 
5771     // We don't need to keep track of new CLDs anymore.
5772     ClassLoaderDataGraph::remember_new_clds(false);
5773 
5774     verify_work_stacks_empty();
5775   }
5776 
5777   {
5778     TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty);
5779 
5780     verify_work_stacks_empty();
5781 
5782     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5783     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5784 
5785     verify_work_stacks_empty();
5786   }
5787 
5788   // We might have added oops to ClassLoaderData::_handles during the
5789   // concurrent marking phase. These oops point to newly allocated objects
5790   // that are guaranteed to be kept alive. Either by the direct allocation
5791   // code, or when the young collector processes the strong roots. Hence,
5792   // we don't have to revisit the _handles block during the remark phase.
5793 
5794   verify_work_stacks_empty();
5795   // Restore evacuated mark words, if any, used for overflow list links
5796   if (!CMSOverflowEarlyRestoration) {
5797     restore_preserved_marks_if_any();
5798   }


5960 }
5961 
5962 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5963 
5964   ResourceMark rm;
5965   HandleMark   hm;
5966 
5967   ReferenceProcessor* rp = ref_processor();
5968   assert(rp->span().equals(_span), "Spans should be equal");
5969   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5970   // Process weak references.
5971   rp->setup_policy(clear_all_soft_refs);
5972   verify_work_stacks_empty();
5973 
5974   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5975                                           &_markStack, false /* !preclean */);
5976   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5977                                 _span, &_markBitMap, &_markStack,
5978                                 &cmsKeepAliveClosure, false /* !preclean */);
5979   {
5980     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);


5981     if (rp->processing_is_mt()) {
5982       // Set the degree of MT here.  If the discovery is done MT, there
5983       // may have been a different number of threads doing the discovery
5984       // and a different number of discovered lists may have Ref objects.
5985       // That is OK as long as the Reference lists are balanced (see
5986       // balance_all_queues() and balance_queues()).
5987       GenCollectedHeap* gch = GenCollectedHeap::heap();
5988       int active_workers = ParallelGCThreads;
5989       FlexibleWorkGang* workers = gch->workers();
5990       if (workers != NULL) {
5991         active_workers = workers->active_workers();
5992         // The expectation is that active_workers will have already
5993         // been set to a reasonable value.  If it has not been set,
5994         // investigate.
5995         assert(active_workers > 0, "Should have been set during scavenge");
5996       }
5997       rp->set_active_mt_degree(active_workers);
5998       CMSRefProcTaskExecutor task_executor(*this);
5999       rp->process_discovered_references(&_is_alive_closure,
6000                                         &cmsKeepAliveClosure,
6001                                         &cmsDrainMarkingStackClosure,
6002                                         &task_executor);

6003     } else {
6004       rp->process_discovered_references(&_is_alive_closure,
6005                                         &cmsKeepAliveClosure,
6006                                         &cmsDrainMarkingStackClosure,
6007                                         NULL);

6008     }


6009   }
6010 
6011   // This is the point where the entire marking should have completed.
6012   verify_work_stacks_empty();
6013 
6014   if (should_unload_classes()) {
6015     {
6016       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
6017 
6018       // Unload classes and purge the SystemDictionary.
6019       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6020 
6021       // Unload nmethods.
6022       CodeCache::do_unloading(&_is_alive_closure, purged_class);
6023 
6024       // Prune dead klasses from subklass/sibling/implementor lists.
6025       Klass::clean_weak_klass_links(&_is_alive_closure);
6026     }
6027 
6028     {
6029       TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
6030       // Clean up unreferenced symbols in symbol table.
6031       SymbolTable::unlink();
6032     }
6033   }
6034 
6035   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6036   // Need to check if we really scanned the StringTable.
6037   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6038     TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
6039     // Delete entries for dead interned strings.
6040     StringTable::unlink(&_is_alive_closure);
6041   }
6042 
6043   // Restore any preserved marks as a result of mark stack or
6044   // work queue overflow
6045   restore_preserved_marks_if_any();  // done single-threaded for now
6046 
6047   rp->set_enqueuing_is_done(true);
6048   if (rp->processing_is_mt()) {
6049     rp->balance_all_queues();
6050     CMSRefProcTaskExecutor task_executor(*this);
6051     rp->enqueue_discovered_references(&task_executor);
6052   } else {
6053     rp->enqueue_discovered_references(NULL);
6054   }
6055   rp->verify_no_references_recorded();
6056   assert(!rp->discovery_enabled(), "should have been disabled");
6057 }
6058 


6363     // and count.
6364     sp->reset_gc_overhead_limit_count();
6365     _collectorState = Idling;
6366   } else {
6367     // already have the lock
6368     assert(_collectorState == Resetting, "just checking");
6369     assert_lock_strong(bitMapLock());
6370     _markBitMap.clear_all();
6371     _collectorState = Idling;
6372   }
6373 
6374   // Stop incremental mode after a cycle completes, so that any future cycles
6375   // are triggered by allocation.
6376   stop_icms();
6377 
6378   NOT_PRODUCT(
6379     if (RotateCMSCollectionTypes) {
6380       _cmsGen->rotate_debug_collection_type();
6381     }
6382   )


6383 }
6384 
6385 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6386   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6387   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6388   TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
6389   TraceCollectorStats tcs(counters());
6390 
6391   switch (op) {
6392     case CMS_op_checkpointRootsInitial: {
6393       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6394       checkpointRootsInitial(true);       // asynch
6395       if (PrintGC) {
6396         _cmsGen->printOccupancy("initial-mark");
6397       }
6398       break;
6399     }
6400     case CMS_op_checkpointRootsFinal: {
6401       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6402       checkpointRootsFinal(true,    // asynch
6403                            false,   // !clear_all_soft_refs
6404                            false);  // !init_mark_was_synchronous
6405       if (PrintGC) {
6406         _cmsGen->printOccupancy("remark");
6407       }
6408       break;




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"
  40 #include "gc_implementation/shared/gcTimer.hpp"
  41 #include "gc_implementation/shared/gcTrace.hpp"
  42 #include "gc_implementation/shared/gcTraceTime.hpp"
  43 #include "gc_implementation/shared/isGCActiveMark.hpp"
  44 #include "gc_interface/collectedHeap.inline.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/cardTableRS.hpp"
  47 #include "memory/collectorPolicy.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.hpp"
  53 #include "memory/referencePolicy.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "memory/tenuredGeneration.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/vmThread.hpp"
  62 #include "services/memoryService.hpp"
  63 #include "services/runtimeService.hpp"
  64 
  65 // statics
  66 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  67 bool CMSCollector::_full_gc_requested = false;
  68 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  69 
  70 //////////////////////////////////////////////////////////////////
  71 // In support of CMS/VM thread synchronization
  72 //////////////////////////////////////////////////////////////////
  73 // We split use of the CGC_lock into 2 "levels".
  74 // The low-level locking is of the usual CGC_lock monitor. We introduce
  75 // a higher level "token" (hereafter "CMS token") built on top of the
  76 // low level monitor (hereafter "CGC lock").
  77 // The token-passing protocol gives priority to the VM thread. The
  78 // CMS-lock doesn't provide any fairness guarantees, but clients
  79 // should ensure that it is only held for very short, bounded
  80 // durations.
  81 //
  82 // When either of the CMS thread or the VM thread is involved in
  83 // collection operations during which it does not want the other
  84 // thread to interfere, it obtains the CMS token.
  85 //
  86 // If either thread tries to get the token while the other has
  87 // it, that thread waits. However, if the VM thread and CMS thread
  88 // both want the token, then the VM thread gets priority while the


 579   _ser_pmc_preclean_ovflw(0),
 580   _ser_kac_preclean_ovflw(0),
 581   _ser_pmc_remark_ovflw(0),
 582   _par_pmc_remark_ovflw(0),
 583   _ser_kac_ovflw(0),
 584   _par_kac_ovflw(0),
 585 #ifndef PRODUCT
 586   _num_par_pushes(0),
 587 #endif
 588   _collection_count_start(0),
 589   _verifying(false),
 590   _icms_start_limit(NULL),
 591   _icms_stop_limit(NULL),
 592   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 593   _completed_initialization(false),
 594   _collector_policy(cp),
 595   _should_unload_classes(false),
 596   _concurrent_cycles_since_last_unload(0),
 597   _roots_scanning_options(0),
 598   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 599   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 600   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 601   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 602   _cms_start_registered(false)
 603 {
 604   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 605     ExplicitGCInvokesConcurrent = true;
 606   }
 607   // Now expand the span and allocate the collection support structures
 608   // (MUT, marking bit map etc.) to cover both generations subject to
 609   // collection.
 610 
 611   // For use by dirty card to oop closures.
 612   _cmsGen->cmsSpace()->set_collector(this);
 613 
 614   // Allocate MUT and marking bit map
 615   {
 616     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 617     if (!_markBitMap.allocate(_span)) {
 618       warning("Failed to allocate CMS Bit Map");
 619       return;
 620     }
 621     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 622   }


1667   }
1668 
1669   // The following "if" branch is present for defensive reasons.
1670   // In the current uses of this interface, it can be replaced with:
1671   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1672   // But I am not placing that assert here to allow future
1673   // generality in invoking this interface.
1674   if (GC_locker::is_active()) {
1675     // A consistency test for GC_locker
1676     assert(GC_locker::needs_gc(), "Should have been set already");
1677     // Skip this foreground collection, instead
1678     // expanding the heap if necessary.
1679     // Need the free list locks for the call to free() in compute_new_size()
1680     compute_new_size();
1681     return;
1682   }
1683   acquire_control_and_collect(full, clear_all_soft_refs);
1684   _full_gcs_since_conc_gc++;
1685 }
1686 
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1688   GenCollectedHeap* gch = GenCollectedHeap::heap();
1689   unsigned int gc_count = gch->total_full_collections();
1690   if (gc_count == full_gc_count) {
1691     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1692     _full_gc_requested = true;
1693     _full_gc_cause = cause;
1694     CGC_lock->notify();   // nudge CMS thread
1695   } else {
1696     assert(gc_count > full_gc_count, "Error: causal loop");
1697   }
1698 }
1699 
1700 bool CMSCollector::is_external_interruption() {
1701   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1702   return GCCause::is_user_requested_gc(cause) ||
1703          GCCause::is_serviceability_requested_gc(cause);
1704 }
1705 
1706 void CMSCollector::report_concurrent_mode_interruption() {
1707   if (is_external_interruption()) {
1708     if (PrintGCDetails) {
1709       gclog_or_tty->print(" (concurrent mode interrupted)");
1710     }
1711   } else {
1712     if (PrintGCDetails) {
1713       gclog_or_tty->print(" (concurrent mode failure)");
1714     }
1715     _gc_tracer_cm->report_concurrent_mode_failure();
1716   }
1717 }
1718 
1719 
1720 // The foreground and background collectors need to coordinate in order
1721 // to make sure that they do not mutually interfere with CMS collections.
1722 // When a background collection is active,
1723 // the foreground collector may need to take over (preempt) and
1724 // synchronously complete an ongoing collection. Depending on the
1725 // frequency of the background collections and the heap usage
1726 // of the application, this preemption can be seldom or frequent.
1727 // There are only certain
1728 // points in the background collection that the "collection-baton"
1729 // can be passed to the foreground collector.
1730 //
1731 // The foreground collector will wait for the baton before
1732 // starting any part of the collection.  The foreground collector
1733 // will only wait at one location.
1734 //
1735 // The background collector will yield the baton before starting a new
1736 // phase of the collection (e.g., before initial marking, marking from roots,
1737 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1738 // of the loop which switches the phases. The background collector does some


1856 
1857   // Check if we need to do a compaction, or if not, whether
1858   // we need to start the mark-sweep from scratch.
1859   bool should_compact    = false;
1860   bool should_start_over = false;
1861   decide_foreground_collection_type(clear_all_soft_refs,
1862     &should_compact, &should_start_over);
1863 
1864 NOT_PRODUCT(
1865   if (RotateCMSCollectionTypes) {
1866     if (_cmsGen->debug_collection_type() ==
1867         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1868       should_compact = true;
1869     } else if (_cmsGen->debug_collection_type() ==
1870                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1871       should_compact = false;
1872     }
1873   }
1874 )
1875 
1876   if (first_state > Idling) {
1877     report_concurrent_mode_interruption();






1878   }
1879 
1880   set_did_compact(should_compact);
1881   if (should_compact) {
1882     // If the collection is being acquired from the background
1883     // collector, there may be references on the discovered
1884     // references lists that have NULL referents (being those
1885     // that were concurrently cleared by a mutator) or
1886     // that are no longer active (having been enqueued concurrently
1887     // by the mutator).
1888     // Scrub the list of those references because Mark-Sweep-Compact
1889     // code assumes referents are not NULL and that all discovered
1890     // Reference objects are active.
1891     ref_processor()->clean_up_discovered_references();
1892 
1893     if (first_state > Idling) {
1894       save_heap_summary();
1895     }
1896 
1897     do_compaction_work(clear_all_soft_refs);
1898 
1899     // Has the GC time limit been exceeded?
1900     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1901     size_t max_eden_size = young_gen->max_capacity() -
1902                            young_gen->to()->capacity() -
1903                            young_gen->from()->capacity();
1904     GenCollectedHeap* gch = GenCollectedHeap::heap();
1905     GCCause::Cause gc_cause = gch->gc_cause();
1906     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1907                                            young_gen->eden()->used(),
1908                                            _cmsGen->max_capacity(),
1909                                            max_eden_size,
1910                                            full,
1911                                            gc_cause,
1912                                            gch->collector_policy());
1913   } else {
1914     do_mark_sweep_work(clear_all_soft_refs, first_state,
1915       should_start_over);
1916   }


1980       // if necessary clear soft refs that weren't previously
1981       // cleared. We do so by remembering the phase in which
1982       // we came in, and if we are past the refs processing
1983       // phase, we'll choose to just redo the mark-sweep
1984       // collection from scratch.
1985       if (_collectorState > FinalMarking) {
1986         // We are past the refs processing phase;
1987         // start over and do a fresh synchronous CMS cycle
1988         _collectorState = Resetting; // skip to reset to start new cycle
1989         reset(false /* == !asynch */);
1990         *should_start_over = true;
1991       } // else we can continue a possibly ongoing current cycle
1992     }
1993   }
1994 }
1995 
1996 // A work method used by the foreground collector to do
1997 // a mark-sweep-compact.
1998 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1999   GenCollectedHeap* gch = GenCollectedHeap::heap();
2000 
2001   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2002   gc_timer->register_gc_start(os::elapsed_counter());
2003 
2004   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2005   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2006 
2007   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2008   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2009     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2010       "collections passed to foreground collector", _full_gcs_since_conc_gc);
2011   }
2012 
2013   // Sample collection interval time and reset for collection pause.
2014   if (UseAdaptiveSizePolicy) {
2015     size_policy()->msc_collection_begin();
2016   }
2017 
2018   // Temporarily widen the span of the weak reference processing to
2019   // the entire heap.
2020   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2021   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2022   // Temporarily, clear the "is_alive_non_header" field of the
2023   // reference processor.
2024   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2025   // Temporarily make reference _processing_ single threaded (non-MT).
2026   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2027   // Temporarily make refs discovery atomic


2078   reset(false /* == !asynch */);
2079   _cmsGen->reset_after_compaction();
2080   _concurrent_cycles_since_last_unload = 0;
2081 
2082   // Clear any data recorded in the PLAB chunk arrays.
2083   if (_survivor_plab_array != NULL) {
2084     reset_survivor_plab_arrays();
2085   }
2086 
2087   // Adjust the per-size allocation stats for the next epoch.
2088   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2089   // Restart the "inter sweep timer" for the next epoch.
2090   _inter_sweep_timer.reset();
2091   _inter_sweep_timer.start();
2092 
2093   // Sample collection pause time and reset for collection interval.
2094   if (UseAdaptiveSizePolicy) {
2095     size_policy()->msc_collection_end(gch->gc_cause());
2096   }
2097 
2098   gc_timer->register_gc_end(os::elapsed_counter());
2099 
2100   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2101 
2102   // For a mark-sweep-compact, compute_new_size() will be called
2103   // in the heap's do_collection() method.
2104 }
2105 
2106 // A work method used by the foreground collector to do
2107 // a mark-sweep, after taking over from a possibly on-going
2108 // concurrent mark-sweep collection.
2109 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2110   CollectorState first_state, bool should_start_over) {
2111   if (PrintGC && Verbose) {
2112     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2113       "collector with count %d",
2114       _full_gcs_since_conc_gc);
2115   }
2116   switch (_collectorState) {
2117     case Idling:
2118       if (first_state == Idling || should_start_over) {
2119         // The background GC was not active, or should
2120         // restarted from scratch;  start the cycle.
2121         _collectorState = InitialMarking;
2122       }
2123       // If first_state was not Idling, then a background GC
2124       // was in progress and has now finished.  No need to do it
2125       // again.  Leave the state as Idling.
2126       break;
2127     case Precleaning:
2128       // In the foreground case don't do the precleaning since
2129       // it is not done concurrently and there is extra work
2130       // required.
2131       _collectorState = FinalMarking;
2132   }
2133   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2134 
2135   // For a mark-sweep, compute_new_size() will be called
2136   // in the heap's do_collection() method.
2137 }
2138 
2139 
2140 void CMSCollector::getFreelistLocks() const {
2141   // Get locks for all free lists in all generations that this
2142   // collector is responsible for
2143   _cmsGen->freelistLock()->lock_without_safepoint_check();
2144 }
2145 
2146 void CMSCollector::releaseFreelistLocks() const {
2147   // Release locks for all free lists in all generations that this
2148   // collector is responsible for
2149   _cmsGen->freelistLock()->unlock();
2150 }
2151 
2152 bool CMSCollector::haveFreelistLocks() const {
2153   // Check locks for all free lists in all generations that this


2173     if (_c->_foregroundGCIsActive) {
2174       CGC_lock->notify();
2175     }
2176     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2177            "Possible deadlock");
2178   }
2179 
2180   ~ReleaseForegroundGC() {
2181     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2182     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2183     _c->_foregroundGCShouldWait = true;
2184   }
2185 };
2186 
2187 // There are separate collect_in_background and collect_in_foreground because of
2188 // the different locking requirements of the background collector and the
2189 // foreground collector.  There was originally an attempt to share
2190 // one "collect" method between the background collector and the foreground
2191 // collector but the if-then-else required made it cleaner to have
2192 // separate methods.
2193 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2194   assert(Thread::current()->is_ConcurrentGC_thread(),
2195     "A CMS asynchronous collection is only allowed on a CMS thread.");
2196 
2197   GenCollectedHeap* gch = GenCollectedHeap::heap();
2198   {
2199     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2200     MutexLockerEx hl(Heap_lock, safepoint_check);
2201     FreelistLocker fll(this);
2202     MutexLockerEx x(CGC_lock, safepoint_check);
2203     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2204       // The foreground collector is active or we're
2205       // not using asynchronous collections.  Skip this
2206       // background collection.
2207       assert(!_foregroundGCShouldWait, "Should be clear");
2208       return;
2209     } else {
2210       assert(_collectorState == Idling, "Should be idling before start.");
2211       _collectorState = InitialMarking;
2212       register_gc_start(cause);
2213       // Reset the expansion cause, now that we are about to begin
2214       // a new cycle.
2215       clear_expansion_cause();
2216 
2217       // Clear the MetaspaceGC flag since a concurrent collection
2218       // is starting but also clear it after the collection.
2219       MetaspaceGC::set_should_concurrent_collect(false);
2220     }
2221     // Decide if we want to enable class unloading as part of the
2222     // ensuing concurrent GC cycle.
2223     update_should_unload_classes();
2224     _full_gc_requested = false;           // acks all outstanding full gc requests
2225     _full_gc_cause = GCCause::_no_gc;
2226     // Signal that we are about to start a collection
2227     gch->increment_total_full_collections();  // ... starting a collection cycle
2228     _collection_count_start = gch->total_full_collections();
2229   }
2230 
2231   // Used for PrintGC
2232   size_t prev_used;
2233   if (PrintGC && Verbose) {
2234     prev_used = _cmsGen->used(); // XXXPERM
2235   }
2236 
2237   // The change of the collection state is normally done at this level;
2238   // the exceptions are phases that are executed while the world is
2239   // stopped.  For those phases the change of state is done while the
2240   // world is stopped.  For baton passing purposes this allows the
2241   // background collector to finish the phase and change state atomically.
2242   // The foreground collector cannot wait on a phase that is done
2243   // while the world is stopped because the foreground collector already
2244   // has the world stopped and would deadlock.
2245   while (_collectorState != Idling) {


2285       } else {
2286         // The background collector can run but check to see if the
2287         // foreground collector has done a collection while the
2288         // background collector was waiting to get the CGC_lock
2289         // above.  If yes, break so that _foregroundGCShouldWait
2290         // is cleared before returning.
2291         if (_collectorState == Idling) {
2292           break;
2293         }
2294       }
2295     }
2296 
2297     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2298       "should be waiting");
2299 
2300     switch (_collectorState) {
2301       case InitialMarking:
2302         {
2303           ReleaseForegroundGC x(this);
2304           stats().record_cms_begin();

2305           VM_CMS_Initial_Mark initial_mark_op(this);
2306           VMThread::execute(&initial_mark_op);
2307         }
2308         // The collector state may be any legal state at this point
2309         // since the background collector may have yielded to the
2310         // foreground collector.
2311         break;
2312       case Marking:
2313         // initial marking in checkpointRootsInitialWork has been completed
2314         if (markFromRoots(true)) { // we were successful
2315           assert(_collectorState == Precleaning, "Collector state should "
2316             "have changed");
2317         } else {
2318           assert(_foregroundGCIsActive, "Internal state inconsistency");
2319         }
2320         break;
2321       case Precleaning:
2322         if (UseAdaptiveSizePolicy) {
2323           size_policy()->concurrent_precleaning_begin();
2324         }


2364         // Stop the timers for adaptive size policy for the concurrent phases
2365         if (UseAdaptiveSizePolicy) {
2366           size_policy()->concurrent_sweeping_end();
2367           size_policy()->concurrent_phases_end(gch->gc_cause(),
2368                                              gch->prev_gen(_cmsGen)->capacity(),
2369                                              _cmsGen->free());
2370         }
2371 
2372       case Resizing: {
2373         // Sweeping has been completed...
2374         // At this point the background collection has completed.
2375         // Don't move the call to compute_new_size() down
2376         // into code that might be executed if the background
2377         // collection was preempted.
2378         {
2379           ReleaseForegroundGC x(this);   // unblock FG collection
2380           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2381           CMSTokenSync        z(true);   // not strictly needed.
2382           if (_collectorState == Resizing) {
2383             compute_new_size();
2384             save_heap_summary();
2385             _collectorState = Resetting;
2386           } else {
2387             assert(_collectorState == Idling, "The state should only change"
2388                    " because the foreground collector has finished the collection");
2389           }
2390         }
2391         break;
2392       }
2393       case Resetting:
2394         // CMS heap resizing has been completed
2395         reset(true);
2396         assert(_collectorState == Idling, "Collector state should "
2397           "have changed");
2398 
2399         MetaspaceGC::set_should_concurrent_collect(false);
2400 
2401         stats().record_cms_end();
2402         // Don't move the concurrent_phases_end() and compute_new_size()
2403         // calls to here because a preempted background collection
2404         // has it's state set to "Resetting".


2423     // foreground collector is waiting, notify it, before
2424     // returning.
2425     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2426     _foregroundGCShouldWait = false;
2427     if (_foregroundGCIsActive) {
2428       CGC_lock->notify();
2429     }
2430     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2431            "Possible deadlock");
2432   }
2433   if (TraceCMSState) {
2434     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2435       " exiting collection CMS state %d",
2436       Thread::current(), _collectorState);
2437   }
2438   if (PrintGC && Verbose) {
2439     _cmsGen->print_heap_change(prev_used);
2440   }
2441 }
2442 
2443 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2444   if (!_cms_start_registered) {
2445     register_gc_start(cause);
2446   }
2447 }
2448 
2449 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2450   _cms_start_registered = true;
2451   _gc_timer_cm->register_gc_start(os::elapsed_counter());
2452   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2453 }
2454 
2455 void CMSCollector::register_gc_end() {
2456   if (_cms_start_registered) {
2457     report_heap_summary(GCWhen::AfterGC);
2458 
2459     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2460     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2461     _cms_start_registered = false;
2462   }
2463 }
2464 
2465 void CMSCollector::save_heap_summary() {
2466   GenCollectedHeap* gch = GenCollectedHeap::heap();
2467   _last_heap_summary = gch->create_heap_summary();
2468   _last_metaspace_summary = gch->create_metaspace_summary();
2469 }
2470 
2471 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2472   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
2473 }
2474 
2475 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2476   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2477          "Foreground collector should be waiting, not executing");
2478   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2479     "may only be done by the VM Thread with the world stopped");
2480   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2481          "VM thread should have CMS token");
2482 
2483   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2484     true, NULL);)
2485   if (UseAdaptiveSizePolicy) {
2486     size_policy()->ms_collection_begin();
2487   }
2488   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2489 
2490   HandleMark hm;  // Discard invalid handles created during verification
2491 
2492   if (VerifyBeforeGC &&
2493       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2494     Universe::verify();
2495   }
2496 
2497   // Snapshot the soft reference policy to be used in this collection cycle.
2498   ref_processor()->setup_policy(clear_all_soft_refs);
2499 
2500   bool init_mark_was_synchronous = false; // until proven otherwise
2501   while (_collectorState != Idling) {
2502     if (TraceCMSState) {
2503       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2504         Thread::current(), _collectorState);
2505     }
2506     switch (_collectorState) {
2507       case InitialMarking:
2508         register_foreground_gc_start(cause);
2509         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2510         checkpointRootsInitial(false);
2511         assert(_collectorState == Marking, "Collector state should have changed"
2512           " within checkpointRootsInitial()");
2513         break;
2514       case Marking:
2515         // initial marking in checkpointRootsInitialWork has been completed
2516         if (VerifyDuringGC &&
2517             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2518           Universe::verify("Verify before initial mark: ");
2519         }
2520         {
2521           bool res = markFromRoots(false);
2522           assert(res && _collectorState == FinalMarking, "Collector state should "
2523             "have changed");
2524           break;
2525         }
2526       case FinalMarking:
2527         if (VerifyDuringGC &&
2528             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {


2537         // final marking in checkpointRootsFinal has been completed
2538         if (VerifyDuringGC &&
2539             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2540           Universe::verify("Verify before sweep: ");
2541         }
2542         sweep(false);
2543         assert(_collectorState == Resizing, "Incorrect state");
2544         break;
2545       case Resizing: {
2546         // Sweeping has been completed; the actual resize in this case
2547         // is done separately; nothing to be done in this state.
2548         _collectorState = Resetting;
2549         break;
2550       }
2551       case Resetting:
2552         // The heap has been resized.
2553         if (VerifyDuringGC &&
2554             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2555           Universe::verify("Verify before reset: ");
2556         }
2557         save_heap_summary();
2558         reset(false);
2559         assert(_collectorState == Idling, "Collector state should "
2560           "have changed");
2561         break;
2562       case Precleaning:
2563       case AbortablePreclean:
2564         // Elide the preclean phase
2565         _collectorState = FinalMarking;
2566         break;
2567       default:
2568         ShouldNotReachHere();
2569     }
2570     if (TraceCMSState) {
2571       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2572         Thread::current(), _collectorState);
2573     }
2574   }
2575 
2576   if (UseAdaptiveSizePolicy) {
2577     GenCollectedHeap* gch = GenCollectedHeap::heap();


3560     if (_print_cr) {
3561       gclog_or_tty->print_cr("");
3562     }
3563     if (PrintCMSStatistics != 0) {
3564       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3565                     _collector->yields());
3566     }
3567   }
3568 }
3569 
3570 // CMS work
3571 
3572 // Checkpoint the roots into this generation from outside
3573 // this generation. [Note this initial checkpoint need only
3574 // be approximate -- we'll do a catch up phase subsequently.]
3575 void CMSCollector::checkpointRootsInitial(bool asynch) {
3576   assert(_collectorState == InitialMarking, "Wrong collector state");
3577   check_correct_thread_executing();
3578   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3579 
3580   save_heap_summary();
3581   report_heap_summary(GCWhen::BeforeGC);
3582 
3583   ReferenceProcessor* rp = ref_processor();
3584   SpecializationStats::clear();
3585   assert(_restart_addr == NULL, "Control point invariant");
3586   if (asynch) {
3587     // acquire locks for subsequent manipulations
3588     MutexLockerEx x(bitMapLock(),
3589                     Mutex::_no_safepoint_check_flag);
3590     checkpointRootsInitialWork(asynch);
3591     // enable ("weak") refs discovery
3592     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3593     _collectorState = Marking;
3594   } else {
3595     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3596     // which recognizes if we are a CMS generation, and doesn't try to turn on
3597     // discovery; verify that they aren't meddling.
3598     assert(!rp->discovery_is_atomic(),
3599            "incorrect setting of discovery predicate");
3600     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3601            "ref discovery for this generation kind");
3602     // already have locks


3608   SpecializationStats::print();
3609 }
3610 
3611 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3612   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3613   assert(_collectorState == InitialMarking, "just checking");
3614 
3615   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3616   // precede our marking with a collection of all
3617   // younger generations to keep floating garbage to a minimum.
3618   // XXX: we won't do this for now -- it's an optimization to be done later.
3619 
3620   // already have locks
3621   assert_lock_strong(bitMapLock());
3622   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3623 
3624   // Setup the verification and class unloading state for this
3625   // CMS collection cycle.
3626   setup_cms_unloading_and_verification_state();
3627 
3628   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3629     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3630   if (UseAdaptiveSizePolicy) {
3631     size_policy()->checkpoint_roots_initial_begin();
3632   }
3633 
3634   // Reset all the PLAB chunk arrays if necessary.
3635   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3636     reset_survivor_plab_arrays();
3637   }
3638 
3639   ResourceMark rm;
3640   HandleMark  hm;
3641 
3642   FalseClosure falseClosure;
3643   // In the case of a synchronous collection, we will elide the
3644   // remark step, so it's important to catch all the nmethod oops
3645   // in this step.
3646   // The final 'true' flag to gen_process_strong_roots will ensure this.
3647   // If 'async' is true, we can relax the nmethod tracing.
3648   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3649   GenCollectedHeap* gch = GenCollectedHeap::heap();


4601     // collection because we don't want to take CPU
4602     // or memory bandwidth away from the young GC threads
4603     // (which may be as many as there are CPUs).
4604     // Note that we don't need to protect ourselves from
4605     // interference with mutators because they can't
4606     // manipulate the discovered reference lists nor affect
4607     // the computed reachability of the referents, the
4608     // only properties manipulated by the precleaning
4609     // of these reference lists.
4610     stopTimer();
4611     CMSTokenSyncWithLocks x(true /* is cms thread */,
4612                             bitMapLock());
4613     startTimer();
4614     sample_eden();
4615 
4616     // The following will yield to allow foreground
4617     // collection to proceed promptly. XXX YSR:
4618     // The code in this method may need further
4619     // tweaking for better performance and some restructuring
4620     // for cleaner interfaces.
4621     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4622     rp->preclean_discovered_references(
4623           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4624           gc_timer);
4625   }
4626 
4627   if (clean_survivor) {  // preclean the active survivor space(s)
4628     assert(_young_gen->kind() == Generation::DefNew ||
4629            _young_gen->kind() == Generation::ParNew ||
4630            _young_gen->kind() == Generation::ASParNew,
4631          "incorrect type for cast");
4632     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4633     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4634                              &_markBitMap, &_modUnionTable,
4635                              &_markStack, true /* precleaning phase */);
4636     stopTimer();
4637     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4638                              bitMapLock());
4639     startTimer();
4640     unsigned int before_count =
4641       GenCollectedHeap::heap()->total_collections();
4642     SurvivorSpacePrecleanClosure
4643       sss_cl(this, _span, &_markBitMap, &_markStack,
4644              &pam_cl, before_count, CMSYield);


4946   // world is stopped at this checkpoint
4947   assert(SafepointSynchronize::is_at_safepoint(),
4948          "world should be stopped");
4949   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4950 
4951   verify_work_stacks_empty();
4952   verify_overflow_empty();
4953 
4954   SpecializationStats::clear();
4955   if (PrintGCDetails) {
4956     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4957                         _young_gen->used() / K,
4958                         _young_gen->capacity() / K);
4959   }
4960   if (asynch) {
4961     if (CMSScavengeBeforeRemark) {
4962       GenCollectedHeap* gch = GenCollectedHeap::heap();
4963       // Temporarily set flag to false, GCH->do_collection will
4964       // expect it to be false and set to true
4965       FlagSetting fl(gch->_is_gc_active, false);
4966       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4967         PrintGCDetails && Verbose, true, _gc_timer_cm);)
4968       int level = _cmsGen->level() - 1;
4969       if (level >= 0) {
4970         gch->do_collection(true,        // full (i.e. force, see below)
4971                            false,       // !clear_all_soft_refs
4972                            0,           // size
4973                            false,       // is_tlab
4974                            level        // max_level
4975                           );
4976       }
4977     }
4978     FreelistLocker x(this);
4979     MutexLockerEx y(bitMapLock(),
4980                     Mutex::_no_safepoint_check_flag);
4981     assert(!init_mark_was_synchronous, "but that's impossible!");
4982     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4983   } else {
4984     // already have all the locks
4985     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4986                              init_mark_was_synchronous);
4987   }
4988   verify_work_stacks_empty();
4989   verify_overflow_empty();
4990   SpecializationStats::print();
4991 }
4992 
4993 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4994   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4995 
4996   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4997 
4998   assert(haveFreelistLocks(), "must have free list locks");
4999   assert_lock_strong(bitMapLock());
5000 
5001   if (UseAdaptiveSizePolicy) {
5002     size_policy()->checkpoint_roots_final_begin();
5003   }
5004 
5005   ResourceMark rm;
5006   HandleMark   hm;
5007 
5008   GenCollectedHeap* gch = GenCollectedHeap::heap();
5009 
5010   if (should_unload_classes()) {
5011     CodeCache::gc_prologue();
5012   }
5013   assert(haveFreelistLocks(), "must have free list locks");
5014   assert_lock_strong(bitMapLock());
5015 
5016   if (!init_mark_was_synchronous) {


5027     // we cannot rely on TLAB's having been filled and must do
5028     // so here just in case a scavenge did not happen.
5029     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5030     // Update the saved marks which may affect the root scans.
5031     gch->save_marks();
5032 
5033     {
5034       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5035 
5036       // Note on the role of the mod union table:
5037       // Since the marker in "markFromRoots" marks concurrently with
5038       // mutators, it is possible for some reachable objects not to have been
5039       // scanned. For instance, an only reference to an object A was
5040       // placed in object B after the marker scanned B. Unless B is rescanned,
5041       // A would be collected. Such updates to references in marked objects
5042       // are detected via the mod union table which is the set of all cards
5043       // dirtied since the first checkpoint in this GC cycle and prior to
5044       // the most recent young generation GC, minus those cleaned up by the
5045       // concurrent precleaning.
5046       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5047         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5048         do_remark_parallel();
5049       } else {
5050         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5051                     _gc_timer_cm);
5052         do_remark_non_parallel();
5053       }
5054     }
5055   } else {
5056     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5057     // The initial mark was stop-world, so there's no rescanning to
5058     // do; go straight on to the next step below.
5059   }
5060   verify_work_stacks_empty();
5061   verify_overflow_empty();
5062 
5063   {
5064     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5065     refProcessingWork(asynch, clear_all_soft_refs);
5066   }
5067   verify_work_stacks_empty();
5068   verify_overflow_empty();
5069 
5070   if (should_unload_classes()) {
5071     CodeCache::gc_epilogue();
5072   }
5073   JvmtiExport::gc_epilogue();
5074 
5075   // If we encountered any (marking stack / work queue) overflow
5076   // events during the current CMS cycle, take appropriate
5077   // remedial measures, where possible, so as to try and avoid
5078   // recurrence of that condition.
5079   assert(_markStack.isEmpty(), "No grey objects");
5080   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5081                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5082   if (ser_ovflw > 0) {
5083     if (PrintCMSStatistics != 0) {
5084       gclog_or_tty->print_cr("Marking stack overflow (benign) "


5105   if (PrintCMSStatistics != 0) {
5106      if (_markStack._hit_limit > 0) {
5107        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5108                               _markStack._hit_limit);
5109      }
5110      if (_markStack._failed_double > 0) {
5111        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5112                               " current capacity "SIZE_FORMAT,
5113                               _markStack._failed_double,
5114                               _markStack.capacity());
5115      }
5116   }
5117   _markStack._hit_limit = 0;
5118   _markStack._failed_double = 0;
5119 
5120   if ((VerifyAfterGC || VerifyDuringGC) &&
5121       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5122     verify_after_remark();
5123   }
5124 
5125   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5126 
5127   // Change under the freelistLocks.
5128   _collectorState = Sweeping;
5129   // Call isAllClear() under bitMapLock
5130   assert(_modUnionTable.isAllClear(),
5131       "Should be clear by end of the final marking");
5132   assert(_ct->klass_rem_set()->mod_union_is_clear(),
5133       "Should be clear by end of the final marking");
5134   if (UseAdaptiveSizePolicy) {
5135     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5136   }
5137 }
5138 
5139 // Parallel remark task
5140 class CMSParRemarkTask: public AbstractGangTask {
5141   CMSCollector* _collector;
5142   int           _n_workers;
5143   CompactibleFreeListSpace* _cms_space;
5144 
5145   // The per-thread work queues, available here for stealing.
5146   OopTaskQueueSet*       _task_queues;


5760   // as a result of work_q overflow
5761   restore_preserved_marks_if_any();
5762 }
5763 
5764 // Non-parallel version of remark
5765 void CMSCollector::do_remark_non_parallel() {
5766   ResourceMark rm;
5767   HandleMark   hm;
5768   GenCollectedHeap* gch = GenCollectedHeap::heap();
5769   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5770 
5771   MarkRefsIntoAndScanClosure
5772     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5773              &_markStack, this,
5774              false /* should_yield */, false /* not precleaning */);
5775   MarkFromDirtyCardsClosure
5776     markFromDirtyCardsClosure(this, _span,
5777                               NULL,  // space is set further below
5778                               &_markBitMap, &_markStack, &mrias_cl);
5779   {
5780     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5781     // Iterate over the dirty cards, setting the corresponding bits in the
5782     // mod union table.
5783     {
5784       ModUnionClosure modUnionClosure(&_modUnionTable);
5785       _ct->ct_bs()->dirty_card_iterate(
5786                       _cmsGen->used_region(),
5787                       &modUnionClosure);
5788     }
5789     // Having transferred these marks into the modUnionTable, we just need
5790     // to rescan the marked objects on the dirty cards in the modUnionTable.
5791     // The initial marking may have been done during an asynchronous
5792     // collection so there may be dirty bits in the mod-union table.
5793     const int alignment =
5794       CardTableModRefBS::card_size * BitsPerWord;
5795     {
5796       // ... First handle dirty cards in CMS gen
5797       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5798       MemRegion ur = _cmsGen->used_region();
5799       HeapWord* lb = ur.start();
5800       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5801       MemRegion cms_span(lb, ub);
5802       _modUnionTable.dirty_range_iterate_clear(cms_span,
5803                                                &markFromDirtyCardsClosure);
5804       verify_work_stacks_empty();
5805       if (PrintCMSStatistics != 0) {
5806         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5807           markFromDirtyCardsClosure.num_dirty_cards());
5808       }
5809     }
5810   }
5811   if (VerifyDuringGC &&
5812       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5813     HandleMark hm;  // Discard invalid handles created during verification
5814     Universe::verify();
5815   }
5816   {
5817     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5818 
5819     verify_work_stacks_empty();
5820 
5821     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5822     GenCollectedHeap::StrongRootsScope srs(gch);
5823     gch->gen_process_strong_roots(_cmsGen->level(),
5824                                   true,  // younger gens as roots
5825                                   false, // use the local StrongRootsScope
5826                                   false, // not scavenging
5827                                   SharedHeap::ScanningOption(roots_scanning_options()),
5828                                   &mrias_cl,
5829                                   true,   // walk code active on stacks
5830                                   NULL,
5831                                   NULL);  // The dirty klasses will be handled below
5832 
5833     assert(should_unload_classes()
5834            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5835            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5836   }
5837 
5838   {
5839     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5840 
5841     verify_work_stacks_empty();
5842 
5843     // Scan all class loader data objects that might have been introduced
5844     // during concurrent marking.
5845     ResourceMark rm;
5846     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5847     for (int i = 0; i < array->length(); i++) {
5848       mrias_cl.do_class_loader_data(array->at(i));
5849     }
5850 
5851     // We don't need to keep track of new CLDs anymore.
5852     ClassLoaderDataGraph::remember_new_clds(false);
5853 
5854     verify_work_stacks_empty();
5855   }
5856 
5857   {
5858     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5859 
5860     verify_work_stacks_empty();
5861 
5862     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5863     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5864 
5865     verify_work_stacks_empty();
5866   }
5867 
5868   // We might have added oops to ClassLoaderData::_handles during the
5869   // concurrent marking phase. These oops point to newly allocated objects
5870   // that are guaranteed to be kept alive. Either by the direct allocation
5871   // code, or when the young collector processes the strong roots. Hence,
5872   // we don't have to revisit the _handles block during the remark phase.
5873 
5874   verify_work_stacks_empty();
5875   // Restore evacuated mark words, if any, used for overflow list links
5876   if (!CMSOverflowEarlyRestoration) {
5877     restore_preserved_marks_if_any();
5878   }


6040 }
6041 
6042 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6043 
6044   ResourceMark rm;
6045   HandleMark   hm;
6046 
6047   ReferenceProcessor* rp = ref_processor();
6048   assert(rp->span().equals(_span), "Spans should be equal");
6049   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6050   // Process weak references.
6051   rp->setup_policy(clear_all_soft_refs);
6052   verify_work_stacks_empty();
6053 
6054   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6055                                           &_markStack, false /* !preclean */);
6056   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6057                                 _span, &_markBitMap, &_markStack,
6058                                 &cmsKeepAliveClosure, false /* !preclean */);
6059   {
6060     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
6061 
6062     ReferenceProcessorStats stats;
6063     if (rp->processing_is_mt()) {
6064       // Set the degree of MT here.  If the discovery is done MT, there
6065       // may have been a different number of threads doing the discovery
6066       // and a different number of discovered lists may have Ref objects.
6067       // That is OK as long as the Reference lists are balanced (see
6068       // balance_all_queues() and balance_queues()).
6069       GenCollectedHeap* gch = GenCollectedHeap::heap();
6070       int active_workers = ParallelGCThreads;
6071       FlexibleWorkGang* workers = gch->workers();
6072       if (workers != NULL) {
6073         active_workers = workers->active_workers();
6074         // The expectation is that active_workers will have already
6075         // been set to a reasonable value.  If it has not been set,
6076         // investigate.
6077         assert(active_workers > 0, "Should have been set during scavenge");
6078       }
6079       rp->set_active_mt_degree(active_workers);
6080       CMSRefProcTaskExecutor task_executor(*this);
6081       stats = rp->process_discovered_references(&_is_alive_closure,
6082                                         &cmsKeepAliveClosure,
6083                                         &cmsDrainMarkingStackClosure,
6084                                         &task_executor,
6085                                         _gc_timer_cm);
6086     } else {
6087       stats = rp->process_discovered_references(&_is_alive_closure,
6088                                         &cmsKeepAliveClosure,
6089                                         &cmsDrainMarkingStackClosure,
6090                                         NULL,
6091                                         _gc_timer_cm);
6092     }
6093     _gc_tracer_cm->report_gc_reference_stats(stats);
6094 
6095   }
6096 
6097   // This is the point where the entire marking should have completed.
6098   verify_work_stacks_empty();
6099 
6100   if (should_unload_classes()) {
6101     {
6102       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
6103 
6104       // Unload classes and purge the SystemDictionary.
6105       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6106 
6107       // Unload nmethods.
6108       CodeCache::do_unloading(&_is_alive_closure, purged_class);
6109 
6110       // Prune dead klasses from subklass/sibling/implementor lists.
6111       Klass::clean_weak_klass_links(&_is_alive_closure);
6112     }
6113 
6114     {
6115       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
6116       // Clean up unreferenced symbols in symbol table.
6117       SymbolTable::unlink();
6118     }
6119   }
6120 
6121   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6122   // Need to check if we really scanned the StringTable.
6123   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6124     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
6125     // Delete entries for dead interned strings.
6126     StringTable::unlink(&_is_alive_closure);
6127   }
6128 
6129   // Restore any preserved marks as a result of mark stack or
6130   // work queue overflow
6131   restore_preserved_marks_if_any();  // done single-threaded for now
6132 
6133   rp->set_enqueuing_is_done(true);
6134   if (rp->processing_is_mt()) {
6135     rp->balance_all_queues();
6136     CMSRefProcTaskExecutor task_executor(*this);
6137     rp->enqueue_discovered_references(&task_executor);
6138   } else {
6139     rp->enqueue_discovered_references(NULL);
6140   }
6141   rp->verify_no_references_recorded();
6142   assert(!rp->discovery_enabled(), "should have been disabled");
6143 }
6144 


6449     // and count.
6450     sp->reset_gc_overhead_limit_count();
6451     _collectorState = Idling;
6452   } else {
6453     // already have the lock
6454     assert(_collectorState == Resetting, "just checking");
6455     assert_lock_strong(bitMapLock());
6456     _markBitMap.clear_all();
6457     _collectorState = Idling;
6458   }
6459 
6460   // Stop incremental mode after a cycle completes, so that any future cycles
6461   // are triggered by allocation.
6462   stop_icms();
6463 
6464   NOT_PRODUCT(
6465     if (RotateCMSCollectionTypes) {
6466       _cmsGen->rotate_debug_collection_type();
6467     }
6468   )
6469 
6470   register_gc_end();
6471 }
6472 
6473 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6474   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6475   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6476   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6477   TraceCollectorStats tcs(counters());
6478 
6479   switch (op) {
6480     case CMS_op_checkpointRootsInitial: {
6481       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6482       checkpointRootsInitial(true);       // asynch
6483       if (PrintGC) {
6484         _cmsGen->printOccupancy("initial-mark");
6485       }
6486       break;
6487     }
6488     case CMS_op_checkpointRootsFinal: {
6489       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6490       checkpointRootsFinal(true,    // asynch
6491                            false,   // !clear_all_soft_refs
6492                            false);  // !init_mark_was_synchronous
6493       if (PrintGC) {
6494         _cmsGen->printOccupancy("remark");
6495       }
6496       break;