src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 4482 : 8013934: Garbage collection event for CMS has wrong cause for System.gc()


  46 #include "memory/collectorPolicy.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genCollectedHeap.hpp"
  49 #include "memory/genMarkSweep.hpp"
  50 #include "memory/genOopClosures.inline.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/referencePolicy.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/memoryService.hpp"
  61 #include "services/runtimeService.hpp"
  62 
  63 // statics
  64 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  65 bool          CMSCollector::_full_gc_requested          = false;

  66 
  67 //////////////////////////////////////////////////////////////////
  68 // In support of CMS/VM thread synchronization
  69 //////////////////////////////////////////////////////////////////
  70 // We split use of the CGC_lock into 2 "levels".
  71 // The low-level locking is of the usual CGC_lock monitor. We introduce
  72 // a higher level "token" (hereafter "CMS token") built on top of the
  73 // low level monitor (hereafter "CGC lock").
  74 // The token-passing protocol gives priority to the VM thread. The
  75 // CMS-lock doesn't provide any fairness guarantees, but clients
  76 // should ensure that it is only held for very short, bounded
  77 // durations.
  78 //
  79 // When either of the CMS thread or the VM thread is involved in
  80 // collection operations during which it does not want the other
  81 // thread to interfere, it obtains the CMS token.
  82 //
  83 // If either thread tries to get the token while the other has
  84 // it, that thread waits. However, if the VM thread and CMS thread
  85 // both want the token, then the VM thread gets priority while the


1666 
1667   // The following "if" branch is present for defensive reasons.
1668   // In the current uses of this interface, it can be replaced with:
1669   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1670   // But I am not placing that assert here to allow future
1671   // generality in invoking this interface.
1672   if (GC_locker::is_active()) {
1673     // A consistency test for GC_locker
1674     assert(GC_locker::needs_gc(), "Should have been set already");
1675     // Skip this foreground collection, instead
1676     // expanding the heap if necessary.
1677     // Need the free list locks for the call to free() in compute_new_size()
1678     compute_new_size();
1679     return;
1680   }
1681   acquire_control_and_collect(full, clear_all_soft_refs);
1682   _full_gcs_since_conc_gc++;
1683 
1684 }
1685 
1686 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1687   GenCollectedHeap* gch = GenCollectedHeap::heap();
1688   unsigned int gc_count = gch->total_full_collections();
1689   if (gc_count == full_gc_count) {
1690     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1691     _full_gc_requested = true;

1692     CGC_lock->notify();   // nudge CMS thread
1693   } else {
1694     assert(gc_count > full_gc_count, "Error: causal loop");
1695   }
1696 }
1697 
1698 bool CMSCollector::is_external_interruption() {
1699   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1700   return GCCause::is_user_requested_gc(cause) ||
1701          GCCause::is_serviceability_requested_gc(cause);
1702 }
1703 
1704 void CMSCollector::report_concurrent_mode_interruption() {
1705   if (is_external_interruption()) {
1706     if (PrintGCDetails) {
1707       gclog_or_tty->print(" (concurrent mode interrupted)");
1708     }
1709   } else {
1710     if (PrintGCDetails) {
1711       gclog_or_tty->print(" (concurrent mode failure)");


2106       "collector with count %d",
2107       _full_gcs_since_conc_gc);
2108   }
2109   switch (_collectorState) {
2110     case Idling:
2111       if (first_state == Idling || should_start_over) {
2112         // The background GC was not active, or should
2113         // restarted from scratch;  start the cycle.
2114         _collectorState = InitialMarking;
2115       }
2116       // If first_state was not Idling, then a background GC
2117       // was in progress and has now finished.  No need to do it
2118       // again.  Leave the state as Idling.
2119       break;
2120     case Precleaning:
2121       // In the foreground case don't do the precleaning since
2122       // it is not done concurrently and there is extra work
2123       // required.
2124       _collectorState = FinalMarking;
2125   }
2126   collect_in_foreground(clear_all_soft_refs);
2127 
2128   // For a mark-sweep, compute_new_size() will be called
2129   // in the heap's do_collection() method.
2130 }
2131 
2132 
2133 void CMSCollector::getFreelistLocks() const {
2134   // Get locks for all free lists in all generations that this
2135   // collector is responsible for
2136   _cmsGen->freelistLock()->lock_without_safepoint_check();
2137   _permGen->freelistLock()->lock_without_safepoint_check();
2138 }
2139 
2140 void CMSCollector::releaseFreelistLocks() const {
2141   // Release locks for all free lists in all generations that this
2142   // collector is responsible for
2143   _cmsGen->freelistLock()->unlock();
2144   _permGen->freelistLock()->unlock();
2145 }
2146 


2169     if (_c->_foregroundGCIsActive) {
2170       CGC_lock->notify();
2171     }
2172     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2173            "Possible deadlock");
2174   }
2175 
2176   ~ReleaseForegroundGC() {
2177     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2178     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2179     _c->_foregroundGCShouldWait = true;
2180   }
2181 };
2182 
2183 // There are separate collect_in_background and collect_in_foreground because of
2184 // the different locking requirements of the background collector and the
2185 // foreground collector.  There was originally an attempt to share
2186 // one "collect" method between the background collector and the foreground
2187 // collector but the if-then-else required made it cleaner to have
2188 // separate methods.
2189 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2190   assert(Thread::current()->is_ConcurrentGC_thread(),
2191     "A CMS asynchronous collection is only allowed on a CMS thread.");
2192 
2193   GenCollectedHeap* gch = GenCollectedHeap::heap();
2194   {
2195     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2196     MutexLockerEx hl(Heap_lock, safepoint_check);
2197     FreelistLocker fll(this);
2198     MutexLockerEx x(CGC_lock, safepoint_check);
2199     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2200       // The foreground collector is active or we're
2201       // not using asynchronous collections.  Skip this
2202       // background collection.
2203       assert(!_foregroundGCShouldWait, "Should be clear");
2204       return;
2205     } else {
2206       assert(_collectorState == Idling, "Should be idling before start.");
2207       _collectorState = InitialMarking;
2208       register_gc_start(GCCause::_cms_concurrent_mark);
2209       // Reset the expansion cause, now that we are about to begin
2210       // a new cycle.
2211       clear_expansion_cause();
2212     }
2213     // Decide if we want to enable class unloading as part of the
2214     // ensuing concurrent GC cycle.
2215     update_should_unload_classes();
2216     _full_gc_requested = false;           // acks all outstanding full gc requests

2217     // Signal that we are about to start a collection
2218     gch->increment_total_full_collections();  // ... starting a collection cycle
2219     _collection_count_start = gch->total_full_collections();
2220   }
2221 
2222   // Used for PrintGC
2223   size_t prev_used;
2224   if (PrintGC && Verbose) {
2225     prev_used = _cmsGen->used(); // XXXPERM
2226   }
2227 
2228   // The change of the collection state is normally done at this level;
2229   // the exceptions are phases that are executed while the world is
2230   // stopped.  For those phases the change of state is done while the
2231   // world is stopped.  For baton passing purposes this allows the
2232   // background collector to finish the phase and change state atomically.
2233   // The foreground collector cannot wait on a phase that is done
2234   // while the world is stopped because the foreground collector already
2235   // has the world stopped and would deadlock.
2236   while (_collectorState != Idling) {


2443 void CMSCollector::register_gc_end() {
2444   if (_cms_start_registered) {
2445     report_heap_summary(GCWhen::AfterGC);
2446 
2447     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2448     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2449     _cms_start_registered = false;
2450   }
2451 }
2452 
2453 void CMSCollector::save_heap_summary() {
2454   GenCollectedHeap* gch = GenCollectedHeap::heap();
2455   _last_heap_summary = gch->create_heap_summary();
2456   _last_perm_gen_summary = gch->create_perm_gen_summary();
2457 }
2458 
2459 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2460   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_perm_gen_summary);
2461 }
2462 
2463 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2464   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2465          "Foreground collector should be waiting, not executing");
2466   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2467     "may only be done by the VM Thread with the world stopped");
2468   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2469          "VM thread should have CMS token");
2470 
2471   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2472     true, NULL);)
2473   if (UseAdaptiveSizePolicy) {
2474     size_policy()->ms_collection_begin();
2475   }
2476   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2477 
2478   HandleMark hm;  // Discard invalid handles created during verification
2479 
2480   if (VerifyBeforeGC &&
2481       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2482     Universe::verify();
2483   }
2484 
2485   // Snapshot the soft reference policy to be used in this collection cycle.
2486   ref_processor()->setup_policy(clear_all_soft_refs);
2487 
2488   bool init_mark_was_synchronous = false; // until proven otherwise
2489   while (_collectorState != Idling) {
2490     if (TraceCMSState) {
2491       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2492         Thread::current(), _collectorState);
2493     }
2494     switch (_collectorState) {
2495       case InitialMarking:
2496         register_foreground_gc_start(GenCollectedHeap::heap()->gc_cause());
2497         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2498         checkpointRootsInitial(false);
2499         assert(_collectorState == Marking, "Collector state should have changed"
2500           " within checkpointRootsInitial()");
2501         break;
2502       case Marking:
2503         // initial marking in checkpointRootsInitialWork has been completed
2504         if (VerifyDuringGC &&
2505             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2506           gclog_or_tty->print("Verify before initial mark: ");
2507           Universe::verify();
2508         }
2509         {
2510           bool res = markFromRoots(false);
2511           assert(res && _collectorState == FinalMarking, "Collector state should "
2512             "have changed");
2513           break;
2514         }
2515       case FinalMarking:
2516         if (VerifyDuringGC &&




  46 #include "memory/collectorPolicy.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genCollectedHeap.hpp"
  49 #include "memory/genMarkSweep.hpp"
  50 #include "memory/genOopClosures.inline.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/referencePolicy.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/memoryService.hpp"
  61 #include "services/runtimeService.hpp"
  62 
  63 // statics
  64 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  65 bool CMSCollector::_full_gc_requested = false;
  66 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  67 
  68 //////////////////////////////////////////////////////////////////
  69 // In support of CMS/VM thread synchronization
  70 //////////////////////////////////////////////////////////////////
  71 // We split use of the CGC_lock into 2 "levels".
  72 // The low-level locking is of the usual CGC_lock monitor. We introduce
  73 // a higher level "token" (hereafter "CMS token") built on top of the
  74 // low level monitor (hereafter "CGC lock").
  75 // The token-passing protocol gives priority to the VM thread. The
  76 // CMS-lock doesn't provide any fairness guarantees, but clients
  77 // should ensure that it is only held for very short, bounded
  78 // durations.
  79 //
  80 // When either of the CMS thread or the VM thread is involved in
  81 // collection operations during which it does not want the other
  82 // thread to interfere, it obtains the CMS token.
  83 //
  84 // If either thread tries to get the token while the other has
  85 // it, that thread waits. However, if the VM thread and CMS thread
  86 // both want the token, then the VM thread gets priority while the


1667 
1668   // The following "if" branch is present for defensive reasons.
1669   // In the current uses of this interface, it can be replaced with:
1670   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1671   // But I am not placing that assert here to allow future
1672   // generality in invoking this interface.
1673   if (GC_locker::is_active()) {
1674     // A consistency test for GC_locker
1675     assert(GC_locker::needs_gc(), "Should have been set already");
1676     // Skip this foreground collection, instead
1677     // expanding the heap if necessary.
1678     // Need the free list locks for the call to free() in compute_new_size()
1679     compute_new_size();
1680     return;
1681   }
1682   acquire_control_and_collect(full, clear_all_soft_refs);
1683   _full_gcs_since_conc_gc++;
1684 
1685 }
1686 
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1688   GenCollectedHeap* gch = GenCollectedHeap::heap();
1689   unsigned int gc_count = gch->total_full_collections();
1690   if (gc_count == full_gc_count) {
1691     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1692     _full_gc_requested = true;
1693     _full_gc_cause = cause;
1694     CGC_lock->notify();   // nudge CMS thread
1695   } else {
1696     assert(gc_count > full_gc_count, "Error: causal loop");
1697   }
1698 }
1699 
1700 bool CMSCollector::is_external_interruption() {
1701   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1702   return GCCause::is_user_requested_gc(cause) ||
1703          GCCause::is_serviceability_requested_gc(cause);
1704 }
1705 
1706 void CMSCollector::report_concurrent_mode_interruption() {
1707   if (is_external_interruption()) {
1708     if (PrintGCDetails) {
1709       gclog_or_tty->print(" (concurrent mode interrupted)");
1710     }
1711   } else {
1712     if (PrintGCDetails) {
1713       gclog_or_tty->print(" (concurrent mode failure)");


2108       "collector with count %d",
2109       _full_gcs_since_conc_gc);
2110   }
2111   switch (_collectorState) {
2112     case Idling:
2113       if (first_state == Idling || should_start_over) {
2114         // The background GC was not active, or should
2115         // restarted from scratch;  start the cycle.
2116         _collectorState = InitialMarking;
2117       }
2118       // If first_state was not Idling, then a background GC
2119       // was in progress and has now finished.  No need to do it
2120       // again.  Leave the state as Idling.
2121       break;
2122     case Precleaning:
2123       // In the foreground case don't do the precleaning since
2124       // it is not done concurrently and there is extra work
2125       // required.
2126       _collectorState = FinalMarking;
2127   }
2128   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2129 
2130   // For a mark-sweep, compute_new_size() will be called
2131   // in the heap's do_collection() method.
2132 }
2133 
2134 
2135 void CMSCollector::getFreelistLocks() const {
2136   // Get locks for all free lists in all generations that this
2137   // collector is responsible for
2138   _cmsGen->freelistLock()->lock_without_safepoint_check();
2139   _permGen->freelistLock()->lock_without_safepoint_check();
2140 }
2141 
2142 void CMSCollector::releaseFreelistLocks() const {
2143   // Release locks for all free lists in all generations that this
2144   // collector is responsible for
2145   _cmsGen->freelistLock()->unlock();
2146   _permGen->freelistLock()->unlock();
2147 }
2148 


2171     if (_c->_foregroundGCIsActive) {
2172       CGC_lock->notify();
2173     }
2174     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2175            "Possible deadlock");
2176   }
2177 
2178   ~ReleaseForegroundGC() {
2179     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2180     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2181     _c->_foregroundGCShouldWait = true;
2182   }
2183 };
2184 
2185 // There are separate collect_in_background and collect_in_foreground because of
2186 // the different locking requirements of the background collector and the
2187 // foreground collector.  There was originally an attempt to share
2188 // one "collect" method between the background collector and the foreground
2189 // collector but the if-then-else required made it cleaner to have
2190 // separate methods.
2191 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2192   assert(Thread::current()->is_ConcurrentGC_thread(),
2193     "A CMS asynchronous collection is only allowed on a CMS thread.");
2194 
2195   GenCollectedHeap* gch = GenCollectedHeap::heap();
2196   {
2197     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2198     MutexLockerEx hl(Heap_lock, safepoint_check);
2199     FreelistLocker fll(this);
2200     MutexLockerEx x(CGC_lock, safepoint_check);
2201     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2202       // The foreground collector is active or we're
2203       // not using asynchronous collections.  Skip this
2204       // background collection.
2205       assert(!_foregroundGCShouldWait, "Should be clear");
2206       return;
2207     } else {
2208       assert(_collectorState == Idling, "Should be idling before start.");
2209       _collectorState = InitialMarking;
2210       register_gc_start(cause);
2211       // Reset the expansion cause, now that we are about to begin
2212       // a new cycle.
2213       clear_expansion_cause();
2214     }
2215     // Decide if we want to enable class unloading as part of the
2216     // ensuing concurrent GC cycle.
2217     update_should_unload_classes();
2218     _full_gc_requested = false;           // acks all outstanding full gc requests
2219     _full_gc_cause = GCCause::_no_gc;
2220     // Signal that we are about to start a collection
2221     gch->increment_total_full_collections();  // ... starting a collection cycle
2222     _collection_count_start = gch->total_full_collections();
2223   }
2224 
2225   // Used for PrintGC
2226   size_t prev_used;
2227   if (PrintGC && Verbose) {
2228     prev_used = _cmsGen->used(); // XXXPERM
2229   }
2230 
2231   // The change of the collection state is normally done at this level;
2232   // the exceptions are phases that are executed while the world is
2233   // stopped.  For those phases the change of state is done while the
2234   // world is stopped.  For baton passing purposes this allows the
2235   // background collector to finish the phase and change state atomically.
2236   // The foreground collector cannot wait on a phase that is done
2237   // while the world is stopped because the foreground collector already
2238   // has the world stopped and would deadlock.
2239   while (_collectorState != Idling) {


2446 void CMSCollector::register_gc_end() {
2447   if (_cms_start_registered) {
2448     report_heap_summary(GCWhen::AfterGC);
2449 
2450     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2451     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2452     _cms_start_registered = false;
2453   }
2454 }
2455 
2456 void CMSCollector::save_heap_summary() {
2457   GenCollectedHeap* gch = GenCollectedHeap::heap();
2458   _last_heap_summary = gch->create_heap_summary();
2459   _last_perm_gen_summary = gch->create_perm_gen_summary();
2460 }
2461 
2462 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2463   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_perm_gen_summary);
2464 }
2465 
2466 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2467   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2468          "Foreground collector should be waiting, not executing");
2469   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2470     "may only be done by the VM Thread with the world stopped");
2471   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2472          "VM thread should have CMS token");
2473 
2474   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2475     true, NULL);)
2476   if (UseAdaptiveSizePolicy) {
2477     size_policy()->ms_collection_begin();
2478   }
2479   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2480 
2481   HandleMark hm;  // Discard invalid handles created during verification
2482 
2483   if (VerifyBeforeGC &&
2484       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2485     Universe::verify();
2486   }
2487 
2488   // Snapshot the soft reference policy to be used in this collection cycle.
2489   ref_processor()->setup_policy(clear_all_soft_refs);
2490 
2491   bool init_mark_was_synchronous = false; // until proven otherwise
2492   while (_collectorState != Idling) {
2493     if (TraceCMSState) {
2494       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2495         Thread::current(), _collectorState);
2496     }
2497     switch (_collectorState) {
2498       case InitialMarking:
2499         register_foreground_gc_start(cause);
2500         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2501         checkpointRootsInitial(false);
2502         assert(_collectorState == Marking, "Collector state should have changed"
2503           " within checkpointRootsInitial()");
2504         break;
2505       case Marking:
2506         // initial marking in checkpointRootsInitialWork has been completed
2507         if (VerifyDuringGC &&
2508             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2509           gclog_or_tty->print("Verify before initial mark: ");
2510           Universe::verify();
2511         }
2512         {
2513           bool res = markFromRoots(false);
2514           assert(res && _collectorState == FinalMarking, "Collector state should "
2515             "have changed");
2516           break;
2517         }
2518       case FinalMarking:
2519         if (VerifyDuringGC &&