1 /*
   2  * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  29 #include "gc_implementation/shared/gcTimer.hpp"
  30 #include "gc_implementation/shared/gcTraceTime.hpp"
  31 #include "gc_implementation/shared/isGCActiveMark.hpp"
  32 #include "memory/gcLocker.inline.hpp"
  33 #include "runtime/interfaceSupport.hpp"
  34 #include "runtime/os.hpp"
  35 #include "utilities/dtrace.hpp"
  36 
  37 
  38 #ifndef USDT2
  39 HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin);
  40 HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end);
  41 
  42 HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin);
  43 HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end);
  44 #endif /* !USDT2 */
  45 
  46 //////////////////////////////////////////////////////////
  47 // Methods in abstract class VM_CMS_Operation
  48 //////////////////////////////////////////////////////////
  49 void VM_CMS_Operation::acquire_pending_list_lock() {
  50   // The caller may block while communicating
  51   // with the SLT thread in order to acquire/release the PLL.
  52   ConcurrentMarkSweepThread::slt()->
  53     manipulatePLL(SurrogateLockerThread::acquirePLL);
  54 }
  55 
  56 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
  57   // The caller may block while communicating
  58   // with the SLT thread in order to acquire/release the PLL.
  59   ConcurrentMarkSweepThread::slt()->
  60     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
  61 }
  62 
  63 void VM_CMS_Operation::verify_before_gc() {
  64   if (VerifyBeforeGC &&
  65       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  66     GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
  67     HandleMark hm;
  68     FreelistLocker x(_collector);
  69     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
  70     Universe::heap()->prepare_for_verify();
  71     Universe::verify();
  72   }
  73 }
  74 
  75 void VM_CMS_Operation::verify_after_gc() {
  76   if (VerifyAfterGC &&
  77       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  78     GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
  79     HandleMark hm;
  80     FreelistLocker x(_collector);
  81     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
  82     Universe::verify();
  83   }
  84 }
  85 
  86 bool VM_CMS_Operation::lost_race() const {
  87   if (CMSCollector::abstract_state() == CMSCollector::Idling) {
  88     // We lost a race to a foreground collection
  89     // -- there's nothing to do
  90     return true;
  91   }
  92   assert(CMSCollector::abstract_state() == legal_state(),
  93          "Inconsistent collector state?");
  94   return false;
  95 }
  96 
  97 bool VM_CMS_Operation::doit_prologue() {
  98   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
  99   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
 100   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
 101          "Possible deadlock");
 102 
 103   if (needs_pll()) {
 104     acquire_pending_list_lock();
 105   }
 106   // Get the Heap_lock after the pending_list_lock.
 107   Heap_lock->lock();
 108   if (lost_race()) {
 109     assert(_prologue_succeeded == false, "Initialized in c'tor");
 110     Heap_lock->unlock();
 111     if (needs_pll()) {
 112       release_and_notify_pending_list_lock();
 113     }
 114   } else {
 115     _prologue_succeeded = true;
 116   }
 117   return _prologue_succeeded;
 118 }
 119 
 120 void VM_CMS_Operation::doit_epilogue() {
 121   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
 122   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
 123   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
 124          "Possible deadlock");
 125 
 126   // Release the Heap_lock first.
 127   Heap_lock->unlock();
 128   if (needs_pll()) {
 129     release_and_notify_pending_list_lock();
 130   }
 131 }
 132 
 133 //////////////////////////////////////////////////////////
 134 // Methods in class VM_CMS_Initial_Mark
 135 //////////////////////////////////////////////////////////
 136 void VM_CMS_Initial_Mark::doit() {
 137   if (lost_race()) {
 138     // Nothing to do.
 139     return;
 140   }
 141 #ifndef USDT2
 142   HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
 143 #else /* USDT2 */
 144   HS_PRIVATE_CMS_INITMARK_BEGIN();
 145 #endif /* USDT2 */
 146 
 147   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 148 
 149   GenCollectedHeap* gch = GenCollectedHeap::heap();
 150   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
 151 
 152   VM_CMS_Operation::verify_before_gc();
 153 
 154   IsGCActiveMark x; // stop-world GC active
 155   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
 156 
 157   VM_CMS_Operation::verify_after_gc();
 158 
 159   _collector->_gc_timer_cm->register_gc_pause_end();
 160 
 161 #ifndef USDT2
 162   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
 163 #else /* USDT2 */
 164   HS_PRIVATE_CMS_INITMARK_END();
 165 #endif /* USDT2 */
 166 }
 167 
 168 //////////////////////////////////////////////////////////
 169 // Methods in class VM_CMS_Final_Remark_Operation
 170 //////////////////////////////////////////////////////////
 171 void VM_CMS_Final_Remark::doit() {
 172   if (lost_race()) {
 173     // Nothing to do.
 174     return;
 175   }
 176 #ifndef USDT2
 177   HS_DTRACE_PROBE(hs_private, cms__remark__begin);
 178 #else /* USDT2 */
 179   HS_PRIVATE_CMS_REMARK_BEGIN();
 180 #endif /* USDT2 */
 181 
 182   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 183 
 184   GenCollectedHeap* gch = GenCollectedHeap::heap();
 185   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
 186 
 187   VM_CMS_Operation::verify_before_gc();
 188 
 189   IsGCActiveMark x; // stop-world GC active
 190   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
 191 
 192   VM_CMS_Operation::verify_after_gc();
 193 
 194   _collector->save_heap_summary();
 195   _collector->_gc_timer_cm->register_gc_pause_end();
 196 
 197 #ifndef USDT2
 198   HS_DTRACE_PROBE(hs_private, cms__remark__end);
 199 #else /* USDT2 */
 200   HS_PRIVATE_CMS_REMARK_END();
 201 #endif /* USDT2 */
 202 }
 203 
 204 // VM operation to invoke a concurrent collection of a
 205 // GenCollectedHeap heap.
 206 void VM_GenCollectFullConcurrent::doit() {
 207   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
 208   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 209 
 210   GenCollectedHeap* gch = GenCollectedHeap::heap();
 211   if (_gc_count_before == gch->total_collections()) {
 212     // The "full" of do_full_collection call below "forces"
 213     // a collection; the second arg, 0, below ensures that
 214     // only the young gen is collected. XXX In the future,
 215     // we'll probably need to have something in this interface
 216     // to say do this only if we are sure we will not bail
 217     // out to a full collection in this attempt, but that's
 218     // for the future.
 219     assert(SafepointSynchronize::is_at_safepoint(),
 220       "We can only be executing this arm of if at a safepoint");
 221     GCCauseSetter gccs(gch, _gc_cause);
 222     gch->do_full_collection(gch->must_clear_all_soft_refs(),
 223                             0 /* collect only youngest gen */);
 224   } // Else no need for a foreground young gc
 225   assert((_gc_count_before < gch->total_collections()) ||
 226          (GC_locker::is_active() /* gc may have been skipped */
 227           && (_gc_count_before == gch->total_collections())),
 228          "total_collections() should be monotonically increasing");
 229 
 230   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 231   assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
 232   if (gch->total_full_collections() == _full_gc_count_before) {
 233     // Disable iCMS until the full collection is done, and
 234     // remember that we did so.
 235     CMSCollector::disable_icms();
 236     _disabled_icms = true;
 237     // In case CMS thread was in icms_wait(), wake it up.
 238     CMSCollector::start_icms();
 239     // Nudge the CMS thread to start a concurrent collection.
 240     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
 241   } else {
 242     assert(_full_gc_count_before < gch->total_full_collections(), "Error");
 243     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
 244   }
 245 }
 246 
 247 bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
 248   Thread* thr = Thread::current();
 249   assert(thr != NULL, "Unexpected tid");
 250   if (!thr->is_Java_thread()) {
 251     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
 252     GenCollectedHeap* gch = GenCollectedHeap::heap();
 253     if (_gc_count_before != gch->total_collections()) {
 254       // No need to do a young gc, we'll just nudge the CMS thread
 255       // in the doit() method above, to be executed soon.
 256       assert(_gc_count_before < gch->total_collections(),
 257              "total_collections() should be monotnically increasing");
 258       return false;  // no need for foreground young gc
 259     }
 260   }
 261   return true;       // may still need foreground young gc
 262 }
 263 
 264 
 265 void VM_GenCollectFullConcurrent::doit_epilogue() {
 266   Thread* thr = Thread::current();
 267   assert(thr->is_Java_thread(), "just checking");
 268   JavaThread* jt = (JavaThread*)thr;
 269   // Release the Heap_lock first.
 270   Heap_lock->unlock();
 271   release_and_notify_pending_list_lock();
 272 
 273   // It is fine to test whether completed collections has
 274   // exceeded our request count without locking because
 275   // the completion count is monotonically increasing;
 276   // this will break for very long-running apps when the
 277   // count overflows and wraps around. XXX fix me !!!
 278   // e.g. at the rate of 1 full gc per ms, this could
 279   // overflow in about 1000 years.
 280   GenCollectedHeap* gch = GenCollectedHeap::heap();
 281   if (_gc_cause != GCCause::_gc_locker &&
 282       gch->total_full_collections_completed() <= _full_gc_count_before) {
 283     // maybe we should change the condition to test _gc_cause ==
 284     // GCCause::_java_lang_system_gc, instead of
 285     // _gc_cause != GCCause::_gc_locker
 286     assert(_gc_cause == GCCause::_java_lang_system_gc,
 287            "the only way to get here if this was a System.gc()-induced GC");
 288     assert(ExplicitGCInvokesConcurrent, "Error");
 289     // Now, wait for witnessing concurrent gc cycle to complete,
 290     // but do so in native mode, because we want to lock the
 291     // FullGCEvent_lock, which may be needed by the VM thread
 292     // or by the CMS thread, so we do not want to be suspended
 293     // while holding that lock.
 294     ThreadToNativeFromVM native(jt);
 295     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 296     // Either a concurrent or a stop-world full gc is sufficient
 297     // witness to our request.
 298     while (gch->total_full_collections_completed() <= _full_gc_count_before) {
 299       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 300     }
 301   }
 302   // Enable iCMS back if we disabled it earlier.
 303   if (_disabled_icms) {
 304     CMSCollector::enable_icms();
 305   }
 306 }