1 /* 2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoader.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "gc/shared/allocTracer.hpp" 29 #include "gc/shared/gcId.hpp" 30 #include "gc/shared/gcLocker.hpp" 31 #include "gc/shared/gcVMOperations.hpp" 32 #include "gc/shared/genCollectedHeap.hpp" 33 #include "interpreter/oopMapCache.hpp" 34 #include "logging/log.hpp" 35 #include "memory/oopFactory.hpp" 36 #include "memory/universe.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/init.hpp" 39 #include "utilities/dtrace.hpp" 40 #include "utilities/macros.hpp" 41 #include "utilities/preserveException.hpp" 42 #if INCLUDE_G1GC 43 #include "gc/g1/g1CollectedHeap.inline.hpp" 44 #include "gc/g1/g1Policy.hpp" 45 #endif // INCLUDE_G1GC 46 47 VM_GC_Operation::~VM_GC_Operation() { 48 CollectedHeap* ch = Universe::heap(); 49 ch->soft_ref_policy()->set_all_soft_refs_clear(false); 50 } 51 52 // The same dtrace probe can't be inserted in two different files, so we 53 // have to call it here, so it's only in one file. Can't create new probes 54 // for the other file anymore. The dtrace probes have to remain stable. 55 void VM_GC_Operation::notify_gc_begin(bool full) { 56 HOTSPOT_GC_BEGIN( 57 full); 58 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 59 } 60 61 void VM_GC_Operation::notify_gc_end() { 62 HOTSPOT_GC_END(); 63 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 64 } 65 66 // Allocations may fail in several threads at about the same time, 67 // resulting in multiple gc requests. We only want to do one of them. 68 // In case a GC locker is active and the need for a GC is already signaled, 69 // we want to skip this GC attempt altogether, without doing a futile 70 // safepoint operation. 71 bool VM_GC_Operation::skip_operation() const { 72 bool skip = (_gc_count_before != Universe::heap()->total_collections()); 73 if (_full && skip) { 74 skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); 75 } 76 if (!skip && GCLocker::is_active_and_needs_gc()) { 77 skip = Universe::heap()->is_maximal_no_gc(); 78 assert(!(skip && (_gc_cause == GCCause::_gc_locker)), 79 "GCLocker cannot be active when initiating GC"); 80 } 81 return skip; 82 } 83 84 bool VM_GC_Operation::doit_prologue() { 85 assert(((_gc_cause != GCCause::_no_gc) && 86 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); 87 88 // To be able to handle a GC the VM initialization needs to be completed. 89 if (!is_init_completed()) { 90 vm_exit_during_initialization( 91 err_msg("GC triggered before VM initialization completed. Try increasing " 92 "NewSize, current value " SIZE_FORMAT "%s.", 93 byte_size_in_proper_unit(NewSize), 94 proper_unit_for_byte_size(NewSize))); 95 } 96 97 // If the GC count has changed someone beat us to the collection 98 Heap_lock->lock(); 99 100 // Check invocations 101 if (skip_operation()) { 102 // skip collection 103 Heap_lock->unlock(); 104 _prologue_succeeded = false; 105 } else { 106 _prologue_succeeded = true; 107 } 108 return _prologue_succeeded; 109 } 110 111 112 void VM_GC_Operation::doit_epilogue() { 113 // Clean up old interpreter OopMap entries that were replaced 114 // during the GC thread root traversal. 115 OopMapCache::cleanup_old_entries(); 116 if (Universe::has_reference_pending_list()) { 117 Heap_lock->notify_all(); 118 } 119 Heap_lock->unlock(); 120 } 121 122 bool VM_GC_HeapInspection::skip_operation() const { 123 return false; 124 } 125 126 bool VM_GC_HeapInspection::collect() { 127 if (GCLocker::is_active()) { 128 return false; 129 } 130 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); 131 return true; 132 } 133 134 void VM_GC_HeapInspection::doit() { 135 HandleMark hm; 136 Universe::heap()->ensure_parsability(false); // must happen, even if collection does 137 // not happen (e.g. due to GCLocker) 138 // or _full_gc being false 139 if (_full_gc) { 140 if (!collect()) { 141 // The collection attempt was skipped because the gc locker is held. 142 // The following dump may then be a tad misleading to someone expecting 143 // only live objects to show up in the dump (see CR 6944195). Just issue 144 // a suitable warning in that case and do not attempt to do a collection. 145 // The latter is a subtle point, because even a failed attempt 146 // to GC will, in fact, induce one in the future, which we 147 // probably want to avoid in this case because the GC that we may 148 // be about to attempt holds value for us only 149 // if it happens now and not if it happens in the eventual 150 // future. 151 log_warning(gc)("GC locker is held; pre-dump GC was skipped"); 152 } 153 } 154 HeapInspection inspect; 155 inspect.heap_inspection(_out); 156 } 157 158 159 void VM_GenCollectForAllocation::doit() { 160 SvcGCMarker sgcm(SvcGCMarker::MINOR); 161 162 GenCollectedHeap* gch = GenCollectedHeap::heap(); 163 GCCauseSetter gccs(gch, _gc_cause); 164 _result = gch->satisfy_failed_allocation(_word_size, _tlab); 165 assert(_result == NULL || gch->is_in_reserved(_result), "result not in heap"); 166 167 if (_result == NULL && GCLocker::is_active_and_needs_gc()) { 168 set_gc_locked(); 169 } 170 } 171 172 void VM_GenCollectFull::doit() { 173 SvcGCMarker sgcm(SvcGCMarker::FULL); 174 175 GenCollectedHeap* gch = GenCollectedHeap::heap(); 176 GCCauseSetter gccs(gch, _gc_cause); 177 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); 178 } 179 180 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, 181 size_t size, 182 Metaspace::MetadataType mdtype, 183 uint gc_count_before, 184 uint full_gc_count_before, 185 GCCause::Cause gc_cause) 186 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), 187 _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) { 188 assert(_size != 0, "An allocation should always be requested with this operation."); 189 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek()); 190 } 191 192 // Returns true iff concurrent GCs unloads metadata. 193 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { 194 #if INCLUDE_G1GC 195 if (UseG1GC && ClassUnloadingWithConcurrentMark) { 196 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 197 g1h->policy()->collector_state()->set_initiate_conc_mark_if_possible(true); 198 199 GCCauseSetter x(g1h, _gc_cause); 200 201 // At this point we are supposed to start a concurrent cycle. We 202 // will do so if one is not already in progress. 203 bool should_start = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause); 204 205 if (should_start) { 206 double pause_target = g1h->policy()->max_pause_time_ms(); 207 g1h->do_collection_pause_at_safepoint(pause_target); 208 } 209 return true; 210 } 211 #endif 212 213 return false; 214 } 215 216 void VM_CollectForMetadataAllocation::doit() { 217 SvcGCMarker sgcm(SvcGCMarker::FULL); 218 219 CollectedHeap* heap = Universe::heap(); 220 GCCauseSetter gccs(heap, _gc_cause); 221 222 // Check again if the space is available. Another thread 223 // may have similarly failed a metadata allocation and induced 224 // a GC that freed space for the allocation. 225 if (!MetadataAllocationFailALot) { 226 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 227 if (_result != NULL) { 228 return; 229 } 230 } 231 232 if (initiate_concurrent_GC()) { 233 // For G1 expand since the collection is going to be concurrent. 234 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 235 if (_result != NULL) { 236 return; 237 } 238 239 log_debug(gc)("G1 full GC for Metaspace"); 240 } 241 242 // Don't clear the soft refs yet. 243 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); 244 // After a GC try to allocate without expanding. Could fail 245 // and expansion will be tried below. 246 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 247 if (_result != NULL) { 248 return; 249 } 250 251 // If still failing, allow the Metaspace to expand. 252 // See delta_capacity_until_GC() for explanation of the 253 // amount of the expansion. 254 // This should work unless there really is no more space 255 // or a MaxMetaspaceSize has been specified on the command line. 256 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 257 if (_result != NULL) { 258 return; 259 } 260 261 // If expansion failed, do a collection clearing soft references. 262 heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs); 263 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 264 if (_result != NULL) { 265 return; 266 } 267 268 log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size); 269 270 if (GCLocker::is_active_and_needs_gc()) { 271 set_gc_locked(); 272 } 273 } 274 275 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) 276 : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) { 277 // Only report if operation was really caused by an allocation. 278 if (_word_size != 0) { 279 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek()); 280 } 281 }