src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 4482 : 8013934: Garbage collection event for CMS has wrong cause for System.gc()


  46 #include "memory/collectorPolicy.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genCollectedHeap.hpp"
  49 #include "memory/genMarkSweep.hpp"
  50 #include "memory/genOopClosures.inline.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/referencePolicy.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/memoryService.hpp"
  61 #include "services/runtimeService.hpp"
  62 
  63 // statics
  64 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  65 bool          CMSCollector::_full_gc_requested          = false;

  66 
  67 //////////////////////////////////////////////////////////////////
  68 // In support of CMS/VM thread synchronization
  69 //////////////////////////////////////////////////////////////////
  70 // We split use of the CGC_lock into 2 "levels".
  71 // The low-level locking is of the usual CGC_lock monitor. We introduce
  72 // a higher level "token" (hereafter "CMS token") built on top of the
  73 // low level monitor (hereafter "CGC lock").
  74 // The token-passing protocol gives priority to the VM thread. The
  75 // CMS-lock doesn't provide any fairness guarantees, but clients
  76 // should ensure that it is only held for very short, bounded
  77 // durations.
  78 //
  79 // When either of the CMS thread or the VM thread is involved in
  80 // collection operations during which it does not want the other
  81 // thread to interfere, it obtains the CMS token.
  82 //
  83 // If either thread tries to get the token while the other has
  84 // it, that thread waits. However, if the VM thread and CMS thread
  85 // both want the token, then the VM thread gets priority while the


1666 
1667   // The following "if" branch is present for defensive reasons.
1668   // In the current uses of this interface, it can be replaced with:
1669   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1670   // But I am not placing that assert here to allow future
1671   // generality in invoking this interface.
1672   if (GC_locker::is_active()) {
1673     // A consistency test for GC_locker
1674     assert(GC_locker::needs_gc(), "Should have been set already");
1675     // Skip this foreground collection, instead
1676     // expanding the heap if necessary.
1677     // Need the free list locks for the call to free() in compute_new_size()
1678     compute_new_size();
1679     return;
1680   }
1681   acquire_control_and_collect(full, clear_all_soft_refs);
1682   _full_gcs_since_conc_gc++;
1683 
1684 }
1685 
1686 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1687   GenCollectedHeap* gch = GenCollectedHeap::heap();
1688   unsigned int gc_count = gch->total_full_collections();
1689   if (gc_count == full_gc_count) {
1690     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1691     _full_gc_requested = true;

1692     CGC_lock->notify();   // nudge CMS thread
1693   } else {
1694     assert(gc_count > full_gc_count, "Error: causal loop");
1695   }
1696 }
1697 
1698 bool CMSCollector::is_external_interruption() {
1699   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1700   return GCCause::is_user_requested_gc(cause) ||
1701          GCCause::is_serviceability_requested_gc(cause);
1702 }
1703 
1704 void CMSCollector::report_concurrent_mode_interruption() {
1705   if (is_external_interruption()) {
1706     if (PrintGCDetails) {
1707       gclog_or_tty->print(" (concurrent mode interrupted)");
1708     }
1709   } else {
1710     if (PrintGCDetails) {
1711       gclog_or_tty->print(" (concurrent mode failure)");


2188 // separate methods.
2189 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2190   assert(Thread::current()->is_ConcurrentGC_thread(),
2191     "A CMS asynchronous collection is only allowed on a CMS thread.");
2192 
2193   GenCollectedHeap* gch = GenCollectedHeap::heap();
2194   {
2195     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2196     MutexLockerEx hl(Heap_lock, safepoint_check);
2197     FreelistLocker fll(this);
2198     MutexLockerEx x(CGC_lock, safepoint_check);
2199     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2200       // The foreground collector is active or we're
2201       // not using asynchronous collections.  Skip this
2202       // background collection.
2203       assert(!_foregroundGCShouldWait, "Should be clear");
2204       return;
2205     } else {
2206       assert(_collectorState == Idling, "Should be idling before start.");
2207       _collectorState = InitialMarking;
2208       register_gc_start(GCCause::_cms_concurrent_mark);


2209       // Reset the expansion cause, now that we are about to begin
2210       // a new cycle.
2211       clear_expansion_cause();
2212     }
2213     // Decide if we want to enable class unloading as part of the
2214     // ensuing concurrent GC cycle.
2215     update_should_unload_classes();
2216     _full_gc_requested = false;           // acks all outstanding full gc requests

2217     // Signal that we are about to start a collection
2218     gch->increment_total_full_collections();  // ... starting a collection cycle
2219     _collection_count_start = gch->total_full_collections();
2220   }
2221 
2222   // Used for PrintGC
2223   size_t prev_used;
2224   if (PrintGC && Verbose) {
2225     prev_used = _cmsGen->used(); // XXXPERM
2226   }
2227 
2228   // The change of the collection state is normally done at this level;
2229   // the exceptions are phases that are executed while the world is
2230   // stopped.  For those phases the change of state is done while the
2231   // world is stopped.  For baton passing purposes this allows the
2232   // background collector to finish the phase and change state atomically.
2233   // The foreground collector cannot wait on a phase that is done
2234   // while the world is stopped because the foreground collector already
2235   // has the world stopped and would deadlock.
2236   while (_collectorState != Idling) {




  46 #include "memory/collectorPolicy.hpp"
  47 #include "memory/gcLocker.inline.hpp"
  48 #include "memory/genCollectedHeap.hpp"
  49 #include "memory/genMarkSweep.hpp"
  50 #include "memory/genOopClosures.inline.hpp"
  51 #include "memory/iterator.hpp"
  52 #include "memory/referencePolicy.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/globals_extension.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/vmThread.hpp"
  60 #include "services/memoryService.hpp"
  61 #include "services/runtimeService.hpp"
  62 
  63 // statics
  64 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  65 bool CMSCollector::_full_gc_requested = false;
  66 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  67 
  68 //////////////////////////////////////////////////////////////////
  69 // In support of CMS/VM thread synchronization
  70 //////////////////////////////////////////////////////////////////
  71 // We split use of the CGC_lock into 2 "levels".
  72 // The low-level locking is of the usual CGC_lock monitor. We introduce
  73 // a higher level "token" (hereafter "CMS token") built on top of the
  74 // low level monitor (hereafter "CGC lock").
  75 // The token-passing protocol gives priority to the VM thread. The
  76 // CMS-lock doesn't provide any fairness guarantees, but clients
  77 // should ensure that it is only held for very short, bounded
  78 // durations.
  79 //
  80 // When either of the CMS thread or the VM thread is involved in
  81 // collection operations during which it does not want the other
  82 // thread to interfere, it obtains the CMS token.
  83 //
  84 // If either thread tries to get the token while the other has
  85 // it, that thread waits. However, if the VM thread and CMS thread
  86 // both want the token, then the VM thread gets priority while the


1667 
1668   // The following "if" branch is present for defensive reasons.
1669   // In the current uses of this interface, it can be replaced with:
1670   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1671   // But I am not placing that assert here to allow future
1672   // generality in invoking this interface.
1673   if (GC_locker::is_active()) {
1674     // A consistency test for GC_locker
1675     assert(GC_locker::needs_gc(), "Should have been set already");
1676     // Skip this foreground collection, instead
1677     // expanding the heap if necessary.
1678     // Need the free list locks for the call to free() in compute_new_size()
1679     compute_new_size();
1680     return;
1681   }
1682   acquire_control_and_collect(full, clear_all_soft_refs);
1683   _full_gcs_since_conc_gc++;
1684 
1685 }
1686 
1687 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1688   GenCollectedHeap* gch = GenCollectedHeap::heap();
1689   unsigned int gc_count = gch->total_full_collections();
1690   if (gc_count == full_gc_count) {
1691     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1692     _full_gc_requested = true;
1693     _full_gc_cause = cause;
1694     CGC_lock->notify();   // nudge CMS thread
1695   } else {
1696     assert(gc_count > full_gc_count, "Error: causal loop");
1697   }
1698 }
1699 
1700 bool CMSCollector::is_external_interruption() {
1701   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1702   return GCCause::is_user_requested_gc(cause) ||
1703          GCCause::is_serviceability_requested_gc(cause);
1704 }
1705 
1706 void CMSCollector::report_concurrent_mode_interruption() {
1707   if (is_external_interruption()) {
1708     if (PrintGCDetails) {
1709       gclog_or_tty->print(" (concurrent mode interrupted)");
1710     }
1711   } else {
1712     if (PrintGCDetails) {
1713       gclog_or_tty->print(" (concurrent mode failure)");


2190 // separate methods.
2191 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2192   assert(Thread::current()->is_ConcurrentGC_thread(),
2193     "A CMS asynchronous collection is only allowed on a CMS thread.");
2194 
2195   GenCollectedHeap* gch = GenCollectedHeap::heap();
2196   {
2197     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2198     MutexLockerEx hl(Heap_lock, safepoint_check);
2199     FreelistLocker fll(this);
2200     MutexLockerEx x(CGC_lock, safepoint_check);
2201     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2202       // The foreground collector is active or we're
2203       // not using asynchronous collections.  Skip this
2204       // background collection.
2205       assert(!_foregroundGCShouldWait, "Should be clear");
2206       return;
2207     } else {
2208       assert(_collectorState == Idling, "Should be idling before start.");
2209       _collectorState = InitialMarking;
2210       GCCause::Cause cause =
2211         _full_gc_requested ? _full_gc_cause : GCCause::_cms_concurrent_mark;
2212       register_gc_start(cause);
2213       // Reset the expansion cause, now that we are about to begin
2214       // a new cycle.
2215       clear_expansion_cause();
2216     }
2217     // Decide if we want to enable class unloading as part of the
2218     // ensuing concurrent GC cycle.
2219     update_should_unload_classes();
2220     _full_gc_requested = false;           // acks all outstanding full gc requests
2221     _full_gc_cause = GCCause::_no_gc;
2222     // Signal that we are about to start a collection
2223     gch->increment_total_full_collections();  // ... starting a collection cycle
2224     _collection_count_start = gch->total_full_collections();
2225   }
2226 
2227   // Used for PrintGC
2228   size_t prev_used;
2229   if (PrintGC && Verbose) {
2230     prev_used = _cmsGen->used(); // XXXPERM
2231   }
2232 
2233   // The change of the collection state is normally done at this level;
2234   // the exceptions are phases that are executed while the world is
2235   // stopped.  For those phases the change of state is done while the
2236   // world is stopped.  For baton passing purposes this allows the
2237   // background collector to finish the phase and change state atomically.
2238   // The foreground collector cannot wait on a phase that is done
2239   // while the world is stopped because the foreground collector already
2240   // has the world stopped and would deadlock.
2241   while (_collectorState != Idling) {