1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  29 #include "gc_implementation/g1/g1Log.hpp"
  30 #include "gc_implementation/g1/vm_operations_g1.hpp"
  31 #include "gc_implementation/shared/gcTimer.hpp"
  32 #include "gc_implementation/shared/gcTraceTime.hpp"
  33 #include "gc_implementation/shared/isGCActiveMark.hpp"
  34 #include "gc_implementation/g1/vm_operations_g1.hpp"
  35 #include "runtime/interfaceSupport.hpp"
  36 
  37 VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before,
  38                                                      size_t word_size)
  39   : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
  40                                    GCCause::_allocation_failure) {
  41   guarantee(word_size != 0, "An allocation should always be requested with this operation.");
  42 }
  43 
  44 void VM_G1CollectForAllocation::doit() {
  45   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  46   GCCauseSetter x(g1h, _gc_cause);
  47 
  48   _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
  49   assert(_result == NULL || _pause_succeeded,
  50          "if we get back a result, the pause should have succeeded");
  51 }
  52 
  53 void VM_G1CollectFull::doit() {
  54   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  55   GCCauseSetter x(g1h, _gc_cause);
  56   g1h->do_full_collection(false /* clear_all_soft_refs */);
  57 }
  58 
  59 VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint           gc_count_before,
  60                                                  size_t         word_size,
  61                                                  bool           should_initiate_conc_mark,
  62                                                  double         target_pause_time_ms,
  63                                                  GCCause::Cause gc_cause)
  64   : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
  65     _should_initiate_conc_mark(should_initiate_conc_mark),
  66     _target_pause_time_ms(target_pause_time_ms),
  67     _should_retry_gc(false),
  68     _old_marking_cycles_completed_before(0) {
  69   guarantee(target_pause_time_ms > 0.0,
  70             err_msg("target_pause_time_ms = %1.6lf should be positive",
  71                     target_pause_time_ms));
  72   _gc_cause = gc_cause;
  73 }
  74 
  75 bool VM_G1IncCollectionPause::doit_prologue() {
  76   bool res = VM_G1OperationWithAllocRequest::doit_prologue();
  77   if (!res) {
  78     if (_should_initiate_conc_mark) {
  79       // The prologue can fail for a couple of reasons. The first is that another GC
  80       // got scheduled and prevented the scheduling of the initial mark GC. The
  81       // second is that the GC locker may be active and the heap can't be expanded.
  82       // In both cases we want to retry the GC so that the initial mark pause is
  83       // actually scheduled. In the second case, however, we should stall until
  84       // until the GC locker is no longer active and then retry the initial mark GC.
  85       _should_retry_gc = true;
  86     }
  87   }
  88   return res;
  89 }
  90 
  91 void VM_G1IncCollectionPause::doit() {
  92   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  93   assert(!_should_initiate_conc_mark ||
  94   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
  95    (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
  96     _gc_cause == GCCause::_g1_humongous_allocation ||
  97     _gc_cause == GCCause::_update_allocation_context_stats_inc),
  98       "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
  99 
 100   if (_word_size > 0) {
 101     // An allocation has been requested. So, try to do that first.
 102     _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
 103                                      false /* expect_null_cur_alloc_region */);
 104     if (_result != NULL) {
 105       // If we can successfully allocate before we actually do the
 106       // pause then we will consider this pause successful.
 107       _pause_succeeded = true;
 108       return;
 109     }
 110   }
 111 
 112   GCCauseSetter x(g1h, _gc_cause);
 113   if (_should_initiate_conc_mark) {
 114     // It's safer to read old_marking_cycles_completed() here, given
 115     // that noone else will be updating it concurrently. Since we'll
 116     // only need it if we're initiating a marking cycle, no point in
 117     // setting it earlier.
 118     _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
 119 
 120     // At this point we are supposed to start a concurrent cycle. We
 121     // will do so if one is not already in progress.
 122     bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 123 
 124     // The above routine returns true if we were able to force the
 125     // next GC pause to be an initial mark; it returns false if a
 126     // marking cycle is already in progress.
 127     //
 128     // If a marking cycle is already in progress just return and skip the
 129     // pause below - if the reason for requesting this initial mark pause
 130     // was due to a System.gc() then the requesting thread should block in
 131     // doit_epilogue() until the marking cycle is complete.
 132     //
 133     // If this initial mark pause was requested as part of a humongous
 134     // allocation then we know that the marking cycle must just have
 135     // been started by another thread (possibly also allocating a humongous
 136     // object) as there was no active marking cycle when the requesting
 137     // thread checked before calling collect() in
 138     // attempt_allocation_humongous(). Retrying the GC, in this case,
 139     // will cause the requesting thread to spin inside collect() until the
 140     // just started marking cycle is complete - which may be a while. So
 141     // we do NOT retry the GC.
 142     if (!res) {
 143       assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
 144       if (_gc_cause != GCCause::_g1_humongous_allocation) {
 145         _should_retry_gc = true;
 146       }
 147       return;
 148     }
 149   }
 150 
 151   _pause_succeeded =
 152     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
 153   if (_pause_succeeded && _word_size > 0) {
 154     // An allocation had been requested.
 155     _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
 156                                       true /* expect_null_cur_alloc_region */);
 157   } else {
 158     assert(_result == NULL, "invariant");
 159     if (!_pause_succeeded) {
 160       // Another possible reason reason for the pause to not be successful
 161       // is that, again, the GC locker is active (and has become active
 162       // since the prologue was executed). In this case we should retry
 163       // the pause after waiting for the GC locker to become inactive.
 164       _should_retry_gc = true;
 165     }
 166   }
 167 }
 168 
 169 void VM_G1IncCollectionPause::doit_epilogue() {
 170   VM_G1OperationWithAllocRequest::doit_epilogue();
 171 
 172   // If the pause was initiated by a System.gc() and
 173   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
 174   // that just started (or maybe one that was already in progress) to
 175   // finish.
 176   if (_gc_cause == GCCause::_java_lang_system_gc &&
 177       _should_initiate_conc_mark) {
 178     assert(ExplicitGCInvokesConcurrent,
 179            "the only way to be here is if ExplicitGCInvokesConcurrent is set");
 180 
 181     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 182 
 183     // In the doit() method we saved g1h->old_marking_cycles_completed()
 184     // in the _old_marking_cycles_completed_before field. We have to
 185     // wait until we observe that g1h->old_marking_cycles_completed()
 186     // has increased by at least one. This can happen if a) we started
 187     // a cycle and it completes, b) a cycle already in progress
 188     // completes, or c) a Full GC happens.
 189 
 190     // If the condition has already been reached, there's no point in
 191     // actually taking the lock and doing the wait.
 192     if (g1h->old_marking_cycles_completed() <=
 193                                           _old_marking_cycles_completed_before) {
 194       // The following is largely copied from CMS
 195 
 196       Thread* thr = Thread::current();
 197       assert(thr->is_Java_thread(), "invariant");
 198       JavaThread* jt = (JavaThread*)thr;
 199       ThreadToNativeFromVM native(jt);
 200 
 201       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 202       while (g1h->old_marking_cycles_completed() <=
 203                                           _old_marking_cycles_completed_before) {
 204         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 205       }
 206     }
 207   }
 208 }
 209 
 210 void VM_CGC_Operation::acquire_pending_list_lock() {
 211   assert(_needs_pll, "don't call this otherwise");
 212   // The caller may block while communicating
 213   // with the SLT thread in order to acquire/release the PLL.
 214   SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
 215   if (slt != NULL) {
 216     slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
 217   } else {
 218     SurrogateLockerThread::report_missing_slt();
 219   }
 220 }
 221 
 222 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
 223   assert(_needs_pll, "don't call this otherwise");
 224   // The caller may block while communicating
 225   // with the SLT thread in order to acquire/release the PLL.
 226   ConcurrentMarkThread::slt()->
 227     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
 228 }
 229 
 230 void VM_CGC_Operation::doit() {
 231   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 232   GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
 233   SharedHeap* sh = SharedHeap::heap();
 234   // This could go away if CollectedHeap gave access to _gc_is_active...
 235   if (sh != NULL) {
 236     IsGCActiveMark x;
 237     _cl->do_void();
 238   } else {
 239     _cl->do_void();
 240   }
 241 }
 242 
 243 bool VM_CGC_Operation::doit_prologue() {
 244   // Note the relative order of the locks must match that in
 245   // VM_GC_Operation::doit_prologue() or deadlocks can occur
 246   if (_needs_pll) {
 247     acquire_pending_list_lock();
 248   }
 249 
 250   Heap_lock->lock();
 251   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
 252   return true;
 253 }
 254 
 255 void VM_CGC_Operation::doit_epilogue() {
 256   // Note the relative order of the unlocks must match that in
 257   // VM_GC_Operation::doit_epilogue()
 258   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
 259   Heap_lock->unlock();
 260   if (_needs_pll) {
 261     release_and_notify_pending_list_lock();
 262   }
 263 }