1 /*
   2  * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  29 #include "gc_implementation/shared/gcTimer.hpp"
  30 #include "gc_implementation/shared/gcTraceTime.hpp"
  31 #include "gc_implementation/shared/isGCActiveMark.hpp"
  32 #include "memory/gcLocker.inline.hpp"
  33 #include "runtime/interfaceSupport.hpp"
  34 #include "runtime/os.hpp"
  35 #include "utilities/dtrace.hpp"
  36 
  37 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  38 
  39 //////////////////////////////////////////////////////////
  40 // Methods in abstract class VM_CMS_Operation
  41 //////////////////////////////////////////////////////////
  42 void VM_CMS_Operation::acquire_pending_list_lock() {
  43   // The caller may block while communicating
  44   // with the SLT thread in order to acquire/release the PLL.
  45   ConcurrentMarkSweepThread::slt()->
  46     manipulatePLL(SurrogateLockerThread::acquirePLL);
  47 }
  48 
  49 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
  50   // The caller may block while communicating
  51   // with the SLT thread in order to acquire/release the PLL.
  52   ConcurrentMarkSweepThread::slt()->
  53     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
  54 }
  55 
  56 void VM_CMS_Operation::verify_before_gc() {
  57   if (VerifyBeforeGC &&
  58       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  59     GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
  60     HandleMark hm;
  61     FreelistLocker x(_collector);
  62     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
  63     Universe::heap()->prepare_for_verify();
  64     Universe::verify();
  65   }
  66 }
  67 
  68 void VM_CMS_Operation::verify_after_gc() {
  69   if (VerifyAfterGC &&
  70       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  71     GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id());
  72     HandleMark hm;
  73     FreelistLocker x(_collector);
  74     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
  75     Universe::verify();
  76   }
  77 }
  78 
  79 bool VM_CMS_Operation::lost_race() const {
  80   if (CMSCollector::abstract_state() == CMSCollector::Idling) {
  81     // We lost a race to a foreground collection
  82     // -- there's nothing to do
  83     return true;
  84   }
  85   assert(CMSCollector::abstract_state() == legal_state(),
  86          "Inconsistent collector state?");
  87   return false;
  88 }
  89 
  90 bool VM_CMS_Operation::doit_prologue() {
  91   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
  92   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
  93   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
  94          "Possible deadlock");
  95 
  96   if (needs_pll()) {
  97     acquire_pending_list_lock();
  98   }
  99   // Get the Heap_lock after the pending_list_lock.
 100   Heap_lock->lock();
 101   if (lost_race()) {
 102     assert(_prologue_succeeded == false, "Initialized in c'tor");
 103     Heap_lock->unlock();
 104     if (needs_pll()) {
 105       release_and_notify_pending_list_lock();
 106     }
 107   } else {
 108     _prologue_succeeded = true;
 109   }
 110   return _prologue_succeeded;
 111 }
 112 
 113 void VM_CMS_Operation::doit_epilogue() {
 114   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
 115   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
 116   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
 117          "Possible deadlock");
 118 
 119   // Release the Heap_lock first.
 120   Heap_lock->unlock();
 121   if (needs_pll()) {
 122     release_and_notify_pending_list_lock();
 123   }
 124 }
 125 
 126 //////////////////////////////////////////////////////////
 127 // Methods in class VM_CMS_Initial_Mark
 128 //////////////////////////////////////////////////////////
 129 void VM_CMS_Initial_Mark::doit() {
 130   if (lost_race()) {
 131     // Nothing to do.
 132     return;
 133   }
 134   HS_PRIVATE_CMS_INITMARK_BEGIN();
 135 
 136   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 137 
 138   GenCollectedHeap* gch = GenCollectedHeap::heap();
 139   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
 140 
 141   VM_CMS_Operation::verify_before_gc();
 142 
 143   IsGCActiveMark x; // stop-world GC active
 144   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
 145 
 146   VM_CMS_Operation::verify_after_gc();
 147 
 148   _collector->_gc_timer_cm->register_gc_pause_end();
 149 
 150   HS_PRIVATE_CMS_INITMARK_END();
 151 }
 152 
 153 //////////////////////////////////////////////////////////
 154 // Methods in class VM_CMS_Final_Remark_Operation
 155 //////////////////////////////////////////////////////////
 156 void VM_CMS_Final_Remark::doit() {
 157   if (lost_race()) {
 158     // Nothing to do.
 159     return;
 160   }
 161   HS_PRIVATE_CMS_REMARK_BEGIN();
 162 
 163   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 164 
 165   GenCollectedHeap* gch = GenCollectedHeap::heap();
 166   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
 167 
 168   VM_CMS_Operation::verify_before_gc();
 169 
 170   IsGCActiveMark x; // stop-world GC active
 171   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
 172 
 173   VM_CMS_Operation::verify_after_gc();
 174 
 175   _collector->save_heap_summary();
 176   _collector->_gc_timer_cm->register_gc_pause_end();
 177 
 178   HS_PRIVATE_CMS_REMARK_END();
 179 }
 180 
 181 // VM operation to invoke a concurrent collection of a
 182 // GenCollectedHeap heap.
 183 void VM_GenCollectFullConcurrent::doit() {
 184   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
 185   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 186 
 187   GenCollectedHeap* gch = GenCollectedHeap::heap();
 188   if (_gc_count_before == gch->total_collections()) {
 189     // The "full" of do_full_collection call below "forces"
 190     // a collection; the second arg, 0, below ensures that
 191     // only the young gen is collected. XXX In the future,
 192     // we'll probably need to have something in this interface
 193     // to say do this only if we are sure we will not bail
 194     // out to a full collection in this attempt, but that's
 195     // for the future.
 196     assert(SafepointSynchronize::is_at_safepoint(),
 197       "We can only be executing this arm of if at a safepoint");
 198     GCCauseSetter gccs(gch, _gc_cause);
 199     gch->do_full_collection(gch->must_clear_all_soft_refs(),
 200                             0 /* collect only youngest gen */);
 201   } // Else no need for a foreground young gc
 202   assert((_gc_count_before < gch->total_collections()) ||
 203          (GC_locker::is_active() /* gc may have been skipped */
 204           && (_gc_count_before == gch->total_collections())),
 205          "total_collections() should be monotonically increasing");
 206 
 207   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 208   assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
 209   if (gch->total_full_collections() == _full_gc_count_before) {
 210     // Nudge the CMS thread to start a concurrent collection.
 211     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
 212   } else {
 213     assert(_full_gc_count_before < gch->total_full_collections(), "Error");
 214     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
 215   }
 216 }
 217 
 218 bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
 219   Thread* thr = Thread::current();
 220   assert(thr != NULL, "Unexpected tid");
 221   if (!thr->is_Java_thread()) {
 222     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
 223     GenCollectedHeap* gch = GenCollectedHeap::heap();
 224     if (_gc_count_before != gch->total_collections()) {
 225       // No need to do a young gc, we'll just nudge the CMS thread
 226       // in the doit() method above, to be executed soon.
 227       assert(_gc_count_before < gch->total_collections(),
 228              "total_collections() should be monotonically increasing");
 229       return false;  // no need for foreground young gc
 230     }
 231   }
 232   return true;       // may still need foreground young gc
 233 }
 234 
 235 
 236 void VM_GenCollectFullConcurrent::doit_epilogue() {
 237   Thread* thr = Thread::current();
 238   assert(thr->is_Java_thread(), "just checking");
 239   JavaThread* jt = (JavaThread*)thr;
 240   // Release the Heap_lock first.
 241   Heap_lock->unlock();
 242   release_and_notify_pending_list_lock();
 243 
 244   // It is fine to test whether completed collections has
 245   // exceeded our request count without locking because
 246   // the completion count is monotonically increasing;
 247   // this will break for very long-running apps when the
 248   // count overflows and wraps around. XXX fix me !!!
 249   // e.g. at the rate of 1 full gc per ms, this could
 250   // overflow in about 1000 years.
 251   GenCollectedHeap* gch = GenCollectedHeap::heap();
 252   if (_gc_cause != GCCause::_gc_locker &&
 253       gch->total_full_collections_completed() <= _full_gc_count_before) {
 254     // maybe we should change the condition to test _gc_cause ==
 255     // GCCause::_java_lang_system_gc, instead of
 256     // _gc_cause != GCCause::_gc_locker
 257     assert(_gc_cause == GCCause::_java_lang_system_gc,
 258            "the only way to get here if this was a System.gc()-induced GC");
 259     assert(ExplicitGCInvokesConcurrent, "Error");
 260     // Now, wait for witnessing concurrent gc cycle to complete,
 261     // but do so in native mode, because we want to lock the
 262     // FullGCEvent_lock, which may be needed by the VM thread
 263     // or by the CMS thread, so we do not want to be suspended
 264     // while holding that lock.
 265     ThreadToNativeFromVM native(jt);
 266     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 267     // Either a concurrent or a stop-world full gc is sufficient
 268     // witness to our request.
 269     while (gch->total_full_collections_completed() <= _full_gc_count_before) {
 270       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 271     }
 272   }
 273 }