1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 29 #include "gc_implementation/g1/g1Log.hpp" 30 #include "gc_implementation/g1/vm_operations_g1.hpp" 31 #include "gc_implementation/shared/gcTimer.hpp" 32 #include "gc_implementation/shared/gcTraceTime.hpp" 33 #include "gc_implementation/shared/isGCActiveMark.hpp" 34 #include "gc_implementation/g1/vm_operations_g1.hpp" 35 #include "runtime/interfaceSupport.hpp" 36 37 bool VM_G1OperationWithAllocRequest::doit_prologue() { 38 bool succeeded = VM_CollectForAllocation::doit_prologue(); 39 if (succeeded) { 40 G1CollectedHeap::heap()->set_heap_lock_held_for_gc(true); 41 } 42 return succeeded; 43 } 44 45 void VM_G1OperationWithAllocRequest::doit_epilogue() { 46 G1CollectedHeap::heap()->set_heap_lock_held_for_gc(false); 47 VM_CollectForAllocation::doit_epilogue(); 48 } 49 50 VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before, 51 size_t word_size) 52 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, 53 GCCause::_allocation_failure) { 54 guarantee(word_size != 0, "An allocation should always be requested with this operation."); 55 } 56 57 void VM_G1CollectForAllocation::doit() { 58 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 59 GCCauseSetter x(g1h, _gc_cause); 60 61 _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded); 62 assert(_result == NULL || _pause_succeeded, 63 "if we get back a result, the pause should have succeeded"); 64 } 65 66 void VM_G1CollectFull::doit() { 67 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 68 GCCauseSetter x(g1h, _gc_cause); 69 g1h->do_full_collection(false /* clear_all_soft_refs */); 70 } 71 72 VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint gc_count_before, 73 size_t word_size, 74 bool should_initiate_conc_mark, 75 double target_pause_time_ms, 76 GCCause::Cause gc_cause) 77 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause), 78 _should_initiate_conc_mark(should_initiate_conc_mark), 79 _target_pause_time_ms(target_pause_time_ms), 80 _should_retry_gc(false), 81 _old_marking_cycles_completed_before(0) { 82 guarantee(target_pause_time_ms > 0.0, 83 err_msg("target_pause_time_ms = %1.6lf should be positive", 84 target_pause_time_ms)); 85 _gc_cause = gc_cause; 86 } 87 88 bool VM_G1IncCollectionPause::doit_prologue() { 89 bool res = VM_G1OperationWithAllocRequest::doit_prologue(); 90 if (!res) { 91 if (_should_initiate_conc_mark) { 92 // The prologue can fail for a couple of reasons. The first is that another GC 93 // got scheduled and prevented the scheduling of the initial mark GC. The 94 // second is that the GC locker may be active and the heap can't be expanded. 95 // In both cases we want to retry the GC so that the initial mark pause is 96 // actually scheduled. In the second case, however, we should stall until 97 // until the GC locker is no longer active and then retry the initial mark GC. 98 _should_retry_gc = true; 99 } 100 } 101 return res; 102 } 103 104 void VM_G1IncCollectionPause::doit() { 105 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 106 assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause), 107 "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle"); 108 109 if (_word_size > 0) { 110 // An allocation has been requested. So, try to do that first. 111 _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), 112 false /* expect_null_cur_alloc_region */); 113 if (_result != NULL) { 114 // If we can successfully allocate before we actually do the 115 // pause then we will consider this pause successful. 116 _pause_succeeded = true; 117 return; 118 } 119 } 120 121 GCCauseSetter x(g1h, _gc_cause); 122 if (_should_initiate_conc_mark) { 123 // It's safer to read old_marking_cycles_completed() here, given 124 // that noone else will be updating it concurrently. Since we'll 125 // only need it if we're initiating a marking cycle, no point in 126 // setting it earlier. 127 _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed(); 128 129 // At this point we are supposed to start a concurrent cycle. We 130 // will do so if one is not already in progress. 131 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); 132 133 // The above routine returns true if we were able to force the 134 // next GC pause to be an initial mark; it returns false if a 135 // marking cycle is already in progress. 136 // 137 // If a marking cycle is already in progress just return and skip the 138 // pause below - if the reason for requesting this initial mark pause 139 // was due to a System.gc() then the requesting thread should block in 140 // doit_epilogue() until the marking cycle is complete. 141 // 142 // If this initial mark pause was requested as part of a humongous 143 // allocation then we know that the marking cycle must just have 144 // been started by another thread (possibly also allocating a humongous 145 // object) as there was no active marking cycle when the requesting 146 // thread checked before calling collect() in 147 // attempt_allocation_humongous(). Retrying the GC, in this case, 148 // will cause the requesting thread to spin inside collect() until the 149 // just started marking cycle is complete - which may be a while. So 150 // we do NOT retry the GC. 151 if (!res) { 152 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating"); 153 if (_gc_cause != GCCause::_g1_humongous_allocation) { 154 _should_retry_gc = true; 155 } 156 return; 157 } 158 } 159 160 _pause_succeeded = 161 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); 162 if (_pause_succeeded && _word_size > 0) { 163 // An allocation had been requested. 164 _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), 165 true /* expect_null_cur_alloc_region */); 166 } else { 167 assert(_result == NULL, "invariant"); 168 if (!_pause_succeeded) { 169 // Another possible reason reason for the pause to not be successful 170 // is that, again, the GC locker is active (and has become active 171 // since the prologue was executed). In this case we should retry 172 // the pause after waiting for the GC locker to become inactive. 173 _should_retry_gc = true; 174 } 175 } 176 } 177 178 void VM_G1IncCollectionPause::doit_epilogue() { 179 VM_G1OperationWithAllocRequest::doit_epilogue(); 180 181 // If the pause was initiated by a System.gc() and 182 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle 183 // that just started (or maybe one that was already in progress) to 184 // finish. 185 if (_gc_cause == GCCause::_java_lang_system_gc && 186 _should_initiate_conc_mark) { 187 assert(ExplicitGCInvokesConcurrent, 188 "the only way to be here is if ExplicitGCInvokesConcurrent is set"); 189 190 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 191 192 // In the doit() method we saved g1h->old_marking_cycles_completed() 193 // in the _old_marking_cycles_completed_before field. We have to 194 // wait until we observe that g1h->old_marking_cycles_completed() 195 // has increased by at least one. This can happen if a) we started 196 // a cycle and it completes, b) a cycle already in progress 197 // completes, or c) a Full GC happens. 198 199 // If the condition has already been reached, there's no point in 200 // actually taking the lock and doing the wait. 201 if (g1h->old_marking_cycles_completed() <= 202 _old_marking_cycles_completed_before) { 203 // The following is largely copied from CMS 204 205 Thread* thr = Thread::current(); 206 assert(thr->is_Java_thread(), "invariant"); 207 JavaThread* jt = (JavaThread*)thr; 208 ThreadToNativeFromVM native(jt); 209 210 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 211 while (g1h->old_marking_cycles_completed() <= 212 _old_marking_cycles_completed_before) { 213 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); 214 } 215 } 216 } 217 } 218 219 void VM_CGC_Operation::acquire_pending_list_lock() { 220 assert(_needs_pll, "don't call this otherwise"); 221 // The caller may block while communicating 222 // with the SLT thread in order to acquire/release the PLL. 223 SurrogateLockerThread* slt = ConcurrentMarkThread::slt(); 224 if (slt != NULL) { 225 slt->manipulatePLL(SurrogateLockerThread::acquirePLL); 226 } else { 227 SurrogateLockerThread::report_missing_slt(); 228 } 229 } 230 231 void VM_CGC_Operation::release_and_notify_pending_list_lock() { 232 assert(_needs_pll, "don't call this otherwise"); 233 // The caller may block while communicating 234 // with the SLT thread in order to acquire/release the PLL. 235 ConcurrentMarkThread::slt()-> 236 manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); 237 } 238 239 void VM_CGC_Operation::doit() { 240 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); 241 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 242 GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id()); 243 IsGCActiveMark x; 244 _cl->do_void(); 245 } 246 247 bool VM_CGC_Operation::doit_prologue() { 248 // Note the relative order of the locks must match that in 249 // VM_GC_Operation::doit_prologue() or deadlocks can occur 250 if (_needs_pll) { 251 acquire_pending_list_lock(); 252 } 253 254 Heap_lock->lock(); 255 G1CollectedHeap::heap()->set_heap_lock_held_for_gc(true); 256 return true; 257 } 258 259 void VM_CGC_Operation::doit_epilogue() { 260 // Note the relative order of the unlocks must match that in 261 // VM_GC_Operation::doit_epilogue() 262 G1CollectedHeap::heap()->set_heap_lock_held_for_gc(false); 263 Heap_lock->unlock(); 264 if (_needs_pll) { 265 release_and_notify_pending_list_lock(); 266 } 267 }