1 /*
   2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  29 #include "gc_implementation/shared/isGCActiveMark.hpp"
  30 #include "memory/gcLocker.inline.hpp"
  31 #include "runtime/interfaceSupport.hpp"
  32 #include "utilities/dtrace.hpp"
  33 
  34 
  35 #ifndef USDT2
  36 HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin);
  37 HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end);
  38 
  39 HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin);
  40 HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end);
  41 #endif /* !USDT2 */
  42 
  43 //////////////////////////////////////////////////////////
  44 // Methods in abstract class VM_CMS_Operation
  45 //////////////////////////////////////////////////////////
  46 void VM_CMS_Operation::acquire_pending_list_lock() {
  47   // The caller may block while communicating
  48   // with the SLT thread in order to acquire/release the PLL.
  49   ConcurrentMarkSweepThread::slt()->
  50     manipulatePLL(SurrogateLockerThread::acquirePLL);
  51 }
  52 
  53 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
  54   // The caller may block while communicating
  55   // with the SLT thread in order to acquire/release the PLL.
  56   ConcurrentMarkSweepThread::slt()->
  57     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
  58 }
  59 
  60 void VM_CMS_Operation::verify_before_gc() {
  61   if (VerifyBeforeGC &&
  62       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  63     HandleMark hm;
  64     FreelistLocker x(_collector);
  65     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
  66     Universe::heap()->prepare_for_verify();
  67     Universe::verify(true);
  68   }
  69 }
  70 
  71 void VM_CMS_Operation::verify_after_gc() {
  72   if (VerifyAfterGC &&
  73       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  74     HandleMark hm;
  75     FreelistLocker x(_collector);
  76     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
  77     Universe::verify(true);
  78   }
  79 }
  80 
  81 bool VM_CMS_Operation::lost_race() const {
  82   if (CMSCollector::abstract_state() == CMSCollector::Idling) {
  83     // We lost a race to a foreground collection
  84     // -- there's nothing to do
  85     return true;
  86   }
  87   assert(CMSCollector::abstract_state() == legal_state(),
  88          "Inconsistent collector state?");
  89   return false;
  90 }
  91 
  92 bool VM_CMS_Operation::doit_prologue() {
  93   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
  94   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
  95   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  96          "Possible deadlock");
  97 
  98   if (needs_pll()) {
  99     acquire_pending_list_lock();
 100   }
 101   // Get the Heap_lock after the pending_list_lock.
 102   Heap_lock->lock();
 103   if (lost_race()) {
 104     assert(_prologue_succeeded == false, "Initialized in c'tor");
 105     Heap_lock->unlock();
 106     if (needs_pll()) {
 107       release_and_notify_pending_list_lock();
 108     }
 109   } else {
 110     _prologue_succeeded = true;
 111   }
 112   return _prologue_succeeded;
 113 }
 114 
 115 void VM_CMS_Operation::doit_epilogue() {
 116   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
 117   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
 118   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
 119          "Possible deadlock");
 120 
 121   // Release the Heap_lock first.
 122   Heap_lock->unlock();
 123   if (needs_pll()) {
 124     release_and_notify_pending_list_lock();
 125   }
 126 }
 127 
 128 //////////////////////////////////////////////////////////
 129 // Methods in class VM_CMS_Initial_Mark
 130 //////////////////////////////////////////////////////////
 131 void VM_CMS_Initial_Mark::doit() {
 132   if (lost_race()) {
 133     // Nothing to do.
 134     return;
 135   }
 136 #ifndef USDT2
 137   HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
 138 #else /* USDT2 */
 139   HS_PRIVATE_CMS_INITMARK_BEGIN(
 140                                 );
 141 #endif /* USDT2 */
 142 
 143   GenCollectedHeap* gch = GenCollectedHeap::heap();
 144   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
 145 
 146   VM_CMS_Operation::verify_before_gc();
 147 
 148   IsGCActiveMark x; // stop-world GC active
 149   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial);
 150 
 151   VM_CMS_Operation::verify_after_gc();
 152 #ifndef USDT2
 153   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
 154 #else /* USDT2 */
 155   HS_PRIVATE_CMS_INITMARK_END(
 156                                 );
 157 #endif /* USDT2 */
 158 }
 159 
 160 //////////////////////////////////////////////////////////
 161 // Methods in class VM_CMS_Final_Remark_Operation
 162 //////////////////////////////////////////////////////////
 163 void VM_CMS_Final_Remark::doit() {
 164   if (lost_race()) {
 165     // Nothing to do.
 166     return;
 167   }
 168 #ifndef USDT2
 169   HS_DTRACE_PROBE(hs_private, cms__remark__begin);
 170 #else /* USDT2 */
 171   HS_PRIVATE_CMS_REMARK_BEGIN(
 172                                 );
 173 #endif /* USDT2 */
 174 
 175   GenCollectedHeap* gch = GenCollectedHeap::heap();
 176   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
 177 
 178   VM_CMS_Operation::verify_before_gc();
 179 
 180   IsGCActiveMark x; // stop-world GC active
 181   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal);
 182 
 183   VM_CMS_Operation::verify_after_gc();
 184 #ifndef USDT2
 185   HS_DTRACE_PROBE(hs_private, cms__remark__end);
 186 #else /* USDT2 */
 187   HS_PRIVATE_CMS_REMARK_END(
 188                                 );
 189 #endif /* USDT2 */
 190 }
 191 
 192 // VM operation to invoke a concurrent collection of a
 193 // GenCollectedHeap heap.
 194 void VM_GenCollectFullConcurrent::doit() {
 195   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
 196   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 197 
 198   GenCollectedHeap* gch = GenCollectedHeap::heap();
 199   if (_gc_count_before == gch->total_collections()) {
 200     // The "full" of do_full_collection call below "forces"
 201     // a collection; the second arg, 0, below ensures that
 202     // only the young gen is collected. XXX In the future,
 203     // we'll probably need to have something in this interface
 204     // to say do this only if we are sure we will not bail
 205     // out to a full collection in this attempt, but that's
 206     // for the future.
 207     assert(SafepointSynchronize::is_at_safepoint(),
 208       "We can only be executing this arm of if at a safepoint");
 209     GCCauseSetter gccs(gch, _gc_cause);
 210     gch->do_full_collection(gch->must_clear_all_soft_refs(),
 211                             0 /* collect only youngest gen */);
 212   } // Else no need for a foreground young gc
 213   assert((_gc_count_before < gch->total_collections()) ||
 214          (GC_locker::is_active() /* gc may have been skipped */
 215           && (_gc_count_before == gch->total_collections())),
 216          "total_collections() should be monotonically increasing");
 217 
 218   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 219   assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
 220   if (gch->total_full_collections() == _full_gc_count_before) {
 221     // Disable iCMS until the full collection is done, and
 222     // remember that we did so.
 223     CMSCollector::disable_icms();
 224     _disabled_icms = true;
 225     // In case CMS thread was in icms_wait(), wake it up.
 226     CMSCollector::start_icms();
 227     // Nudge the CMS thread to start a concurrent collection.
 228     CMSCollector::request_full_gc(_full_gc_count_before);
 229   } else {
 230     assert(_full_gc_count_before < gch->total_full_collections(), "Error");
 231     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
 232   }
 233 }
 234 
 235 bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
 236   Thread* thr = Thread::current();
 237   assert(thr != NULL, "Unexpected tid");
 238   if (!thr->is_Java_thread()) {
 239     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
 240     GenCollectedHeap* gch = GenCollectedHeap::heap();
 241     if (_gc_count_before != gch->total_collections()) {
 242       // No need to do a young gc, we'll just nudge the CMS thread
 243       // in the doit() method above, to be executed soon.
 244       assert(_gc_count_before < gch->total_collections(),
 245              "total_collections() should be monotnically increasing");
 246       return false;  // no need for foreground young gc
 247     }
 248   }
 249   return true;       // may still need foreground young gc
 250 }
 251 
 252 
 253 void VM_GenCollectFullConcurrent::doit_epilogue() {
 254   Thread* thr = Thread::current();
 255   assert(thr->is_Java_thread(), "just checking");
 256   JavaThread* jt = (JavaThread*)thr;
 257   // Release the Heap_lock first.
 258   Heap_lock->unlock();
 259   release_and_notify_pending_list_lock();
 260 
 261   // It is fine to test whether completed collections has
 262   // exceeded our request count without locking because
 263   // the completion count is monotonically increasing;
 264   // this will break for very long-running apps when the
 265   // count overflows and wraps around. XXX fix me !!!
 266   // e.g. at the rate of 1 full gc per ms, this could
 267   // overflow in about 1000 years.
 268   GenCollectedHeap* gch = GenCollectedHeap::heap();
 269   if (_gc_cause != GCCause::_gc_locker &&
 270       gch->total_full_collections_completed() <= _full_gc_count_before) {
 271     // maybe we should change the condition to test _gc_cause ==
 272     // GCCause::_java_lang_system_gc, instead of
 273     // _gc_cause != GCCause::_gc_locker
 274     assert(_gc_cause == GCCause::_java_lang_system_gc,
 275            "the only way to get here if this was a System.gc()-induced GC");
 276     assert(ExplicitGCInvokesConcurrent, "Error");
 277     // Now, wait for witnessing concurrent gc cycle to complete,
 278     // but do so in native mode, because we want to lock the
 279     // FullGCEvent_lock, which may be needed by the VM thread
 280     // or by the CMS thread, so we do not want to be suspended
 281     // while holding that lock.
 282     ThreadToNativeFromVM native(jt);
 283     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 284     // Either a concurrent or a stop-world full gc is sufficient
 285     // witness to our request.
 286     while (gch->total_full_collections_completed() <= _full_gc_count_before) {
 287       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 288     }
 289   }
 290   // Enable iCMS back if we disabled it earlier.
 291   if (_disabled_icms) {
 292     CMSCollector::enable_icms();
 293   }
 294 }