1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp" 28 #include "gc/g1/g1Policy.hpp" 29 #include "gc/shared/gcId.hpp" 30 #include "gc/g1/vm_operations_g1.hpp" 31 #include "gc/shared/gcTimer.hpp" 32 #include "gc/shared/gcTraceTime.inline.hpp" 33 #include "gc/shared/isGCActiveMark.hpp" 34 #include "runtime/interfaceSupport.inline.hpp" 35 36 void VM_G1CollectFull::doit() { 37 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 38 GCCauseSetter x(g1h, _gc_cause); 39 g1h->do_full_collection(false /* clear_all_soft_refs */); 40 41 } 42 43 void VM_G1IdleCompaction::doit() { 44 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 45 GCCauseSetter x(g1h, _gc_cause); 46 g1h->resize_if_necessary_after_full_collection(); 47 g1h->_cm->compute_new_sizes(); 48 } 49 50 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size, 51 uint gc_count_before, 52 GCCause::Cause gc_cause, 53 bool should_initiate_conc_mark, 54 double target_pause_time_ms) 55 : VM_CollectForAllocation(word_size, gc_count_before, gc_cause), 56 _pause_succeeded(false), 57 _should_initiate_conc_mark(should_initiate_conc_mark), 58 _target_pause_time_ms(target_pause_time_ms), 59 _should_retry_gc(false), 60 _old_marking_cycles_completed_before(0) { 61 guarantee(target_pause_time_ms > 0.0, 62 "target_pause_time_ms = %1.6lf should be positive", 63 target_pause_time_ms); 64 _gc_cause = gc_cause; 65 } 66 67 bool VM_G1CollectForAllocation::doit_prologue() { 68 bool res = VM_CollectForAllocation::doit_prologue(); 69 if (!res) { 70 if (_should_initiate_conc_mark) { 71 // The prologue can fail for a couple of reasons. The first is that another GC 72 // got scheduled and prevented the scheduling of the initial mark GC. The 73 // second is that the GC locker may be active and the heap can't be expanded. 74 // In both cases we want to retry the GC so that the initial mark pause is 75 // actually scheduled. In the second case, however, we should stall until 76 // until the GC locker is no longer active and then retry the initial mark GC. 77 _should_retry_gc = true; 78 } 79 } 80 return res; 81 } 82 83 void VM_G1CollectForAllocation::doit() { 84 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 85 assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause), 86 "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle"); 87 88 if (_word_size > 0) { 89 // An allocation has been requested. So, try to do that first. 90 _result = g1h->attempt_allocation_at_safepoint(_word_size, 91 false /* expect_null_cur_alloc_region */); 92 if (_result != NULL) { 93 // If we can successfully allocate before we actually do the 94 // pause then we will consider this pause successful. 95 _pause_succeeded = true; 96 return; 97 } 98 } 99 100 GCCauseSetter x(g1h, _gc_cause); 101 if (_should_initiate_conc_mark) { 102 // It's safer to read old_marking_cycles_completed() here, given 103 // that noone else will be updating it concurrently. Since we'll 104 // only need it if we're initiating a marking cycle, no point in 105 // setting it earlier. 106 _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed(); 107 108 // At this point we are supposed to start a concurrent cycle. We 109 // will do so if one is not already in progress. 110 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); 111 112 // The above routine returns true if we were able to force the 113 // next GC pause to be an initial mark; it returns false if a 114 // marking cycle is already in progress. 115 // 116 // If a marking cycle is already in progress just return and skip the 117 // pause below - if the reason for requesting this initial mark pause 118 // was due to a System.gc() then the requesting thread should block in 119 // doit_epilogue() until the marking cycle is complete. 120 // 121 // If this initial mark pause was requested as part of a humongous 122 // allocation then we know that the marking cycle must just have 123 // been started by another thread (possibly also allocating a humongous 124 // object) as there was no active marking cycle when the requesting 125 // thread checked before calling collect() in 126 // attempt_allocation_humongous(). Retrying the GC, in this case, 127 // will cause the requesting thread to spin inside collect() until the 128 // just started marking cycle is complete - which may be a while. So 129 // we do NOT retry the GC. 130 if (!res) { 131 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating"); 132 if (_gc_cause != GCCause::_g1_humongous_allocation) { 133 _should_retry_gc = true; 134 } 135 return; 136 } 137 } 138 139 // Try a partial collection of some kind. 140 _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); 141 142 if (_pause_succeeded) { 143 if (_word_size > 0) { 144 // An allocation had been requested. Do it, eventually trying a stronger 145 // kind of GC. 146 _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); 147 } else { 148 bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) && 149 !g1h->has_regions_left_for_allocation(); 150 if (should_upgrade_to_full) { 151 // There has been a request to perform a GC to free some space. We have no 152 // information on how much memory has been asked for. In case there are 153 // absolutely no regions left to allocate into, do a maximally compacting full GC. 154 log_info(gc, ergo)("Attempting maximally compacting collection"); 155 _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */ 156 true /* clear_all_soft_refs */); 157 } 158 } 159 guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed."); 160 } else { 161 assert(_result == NULL, "invariant"); 162 // The only reason for the pause to not be successful is that, the GC locker is 163 // active (or has become active since the prologue was executed). In this case 164 // we should retry the pause after waiting for the GC locker to become inactive. 165 _should_retry_gc = true; 166 } 167 } 168 169 void VM_G1CollectForAllocation::doit_epilogue() { 170 VM_CollectForAllocation::doit_epilogue(); 171 172 // If the pause was initiated by a System.gc() and 173 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle 174 // that just started (or maybe one that was already in progress) to 175 // finish. 176 if (GCCause::is_user_requested_gc(_gc_cause) && 177 _should_initiate_conc_mark) { 178 assert(ExplicitGCInvokesConcurrent, 179 "the only way to be here is if ExplicitGCInvokesConcurrent is set"); 180 181 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 182 183 // In the doit() method we saved g1h->old_marking_cycles_completed() 184 // in the _old_marking_cycles_completed_before field. We have to 185 // wait until we observe that g1h->old_marking_cycles_completed() 186 // has increased by at least one. This can happen if a) we started 187 // a cycle and it completes, b) a cycle already in progress 188 // completes, or c) a Full GC happens. 189 190 // If the condition has already been reached, there's no point in 191 // actually taking the lock and doing the wait. 192 if (g1h->old_marking_cycles_completed() <= 193 _old_marking_cycles_completed_before) { 194 // The following is largely copied from CMS 195 196 Thread* thr = Thread::current(); 197 assert(thr->is_Java_thread(), "invariant"); 198 JavaThread* jt = (JavaThread*)thr; 199 ThreadToNativeFromVM native(jt); 200 201 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 202 while (g1h->old_marking_cycles_completed() <= 203 _old_marking_cycles_completed_before) { 204 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); 205 } 206 } 207 } 208 } 209 210 void VM_CGC_Operation::doit() { 211 GCIdMark gc_id_mark(_gc_id); 212 GCTraceCPUTime tcpu; 213 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 214 GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true); 215 TraceCollectorStats tcs(g1h->g1mm()->conc_collection_counters()); 216 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT); 217 IsGCActiveMark x; 218 _cl->do_void(); 219 } 220 221 bool VM_CGC_Operation::doit_prologue() { 222 Heap_lock->lock(); 223 return true; 224 } 225 226 void VM_CGC_Operation::doit_epilogue() { 227 if (Universe::has_reference_pending_list()) { 228 Heap_lock->notify_all(); 229 } 230 Heap_lock->unlock(); 231 }