rev 57625 : [mq]: metaspace-improvement
1 /* 2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoader.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "gc/shared/allocTracer.hpp" 29 #include "gc/shared/gcId.hpp" 30 #include "gc/shared/gcLocker.hpp" 31 #include "gc/shared/gcVMOperations.hpp" 32 #include "gc/shared/genCollectedHeap.hpp" 33 #include "interpreter/oopMapCache.hpp" 34 #include "logging/log.hpp" 35 #include "memory/metaspace/classLoaderMetaspace.hpp" 36 #include "memory/metaspace/metaspaceEnums.hpp" 37 #include "memory/oopFactory.hpp" 38 #include "memory/universe.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/init.hpp" 41 #include "utilities/dtrace.hpp" 42 #include "utilities/macros.hpp" 43 #include "utilities/preserveException.hpp" 44 #if INCLUDE_G1GC 45 #include "gc/g1/g1CollectedHeap.inline.hpp" 46 #include "gc/g1/g1Policy.hpp" 47 #endif // INCLUDE_G1GC 48 49 VM_GC_Operation::~VM_GC_Operation() { 50 CollectedHeap* ch = Universe::heap(); 51 ch->soft_ref_policy()->set_all_soft_refs_clear(false); 52 } 53 54 // The same dtrace probe can't be inserted in two different files, so we 55 // have to call it here, so it's only in one file. Can't create new probes 56 // for the other file anymore. The dtrace probes have to remain stable. 57 void VM_GC_Operation::notify_gc_begin(bool full) { 58 HOTSPOT_GC_BEGIN( 59 full); 60 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 61 } 62 63 void VM_GC_Operation::notify_gc_end() { 64 HOTSPOT_GC_END(); 65 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 66 } 67 68 // Allocations may fail in several threads at about the same time, 69 // resulting in multiple gc requests. We only want to do one of them. 70 // In case a GC locker is active and the need for a GC is already signaled, 71 // we want to skip this GC attempt altogether, without doing a futile 72 // safepoint operation. 73 bool VM_GC_Operation::skip_operation() const { 74 bool skip = (_gc_count_before != Universe::heap()->total_collections()); 75 if (_full && skip) { 76 skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); 77 } 78 if (!skip && GCLocker::is_active_and_needs_gc()) { 79 skip = Universe::heap()->is_maximal_no_gc(); 80 assert(!(skip && (_gc_cause == GCCause::_gc_locker)), 81 "GCLocker cannot be active when initiating GC"); 82 } 83 return skip; 84 } 85 86 bool VM_GC_Operation::doit_prologue() { 87 assert(((_gc_cause != GCCause::_no_gc) && 88 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); 89 90 // To be able to handle a GC the VM initialization needs to be completed. 91 if (!is_init_completed()) { 92 vm_exit_during_initialization( 93 err_msg("GC triggered before VM initialization completed. Try increasing " 94 "NewSize, current value " SIZE_FORMAT "%s.", 95 byte_size_in_proper_unit(NewSize), 96 proper_unit_for_byte_size(NewSize))); 97 } 98 99 // If the GC count has changed someone beat us to the collection 100 Heap_lock->lock(); 101 102 // Check invocations 103 if (skip_operation()) { 104 // skip collection 105 Heap_lock->unlock(); 106 _prologue_succeeded = false; 107 } else { 108 _prologue_succeeded = true; 109 } 110 return _prologue_succeeded; 111 } 112 113 114 void VM_GC_Operation::doit_epilogue() { 115 // Clean up old interpreter OopMap entries that were replaced 116 // during the GC thread root traversal. 117 OopMapCache::cleanup_old_entries(); 118 if (Universe::has_reference_pending_list()) { 119 Heap_lock->notify_all(); 120 } 121 Heap_lock->unlock(); 122 } 123 124 bool VM_GC_HeapInspection::skip_operation() const { 125 return false; 126 } 127 128 bool VM_GC_HeapInspection::collect() { 129 if (GCLocker::is_active()) { 130 return false; 131 } 132 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); 133 return true; 134 } 135 136 void VM_GC_HeapInspection::doit() { 137 HandleMark hm; 138 Universe::heap()->ensure_parsability(false); // must happen, even if collection does 139 // not happen (e.g. due to GCLocker) 140 // or _full_gc being false 141 if (_full_gc) { 142 if (!collect()) { 143 // The collection attempt was skipped because the gc locker is held. 144 // The following dump may then be a tad misleading to someone expecting 145 // only live objects to show up in the dump (see CR 6944195). Just issue 146 // a suitable warning in that case and do not attempt to do a collection. 147 // The latter is a subtle point, because even a failed attempt 148 // to GC will, in fact, induce one in the future, which we 149 // probably want to avoid in this case because the GC that we may 150 // be about to attempt holds value for us only 151 // if it happens now and not if it happens in the eventual 152 // future. 153 log_warning(gc)("GC locker is held; pre-dump GC was skipped"); 154 } 155 } 156 HeapInspection inspect(_csv_format, _print_help, _print_class_stats, 157 _columns); 158 inspect.heap_inspection(_out); 159 } 160 161 162 void VM_GenCollectForAllocation::doit() { 163 SvcGCMarker sgcm(SvcGCMarker::MINOR); 164 165 GenCollectedHeap* gch = GenCollectedHeap::heap(); 166 GCCauseSetter gccs(gch, _gc_cause); 167 _result = gch->satisfy_failed_allocation(_word_size, _tlab); 168 assert(gch->is_in_reserved_or_null(_result), "result not in heap"); 169 170 if (_result == NULL && GCLocker::is_active_and_needs_gc()) { 171 set_gc_locked(); 172 } 173 } 174 175 void VM_GenCollectFull::doit() { 176 SvcGCMarker sgcm(SvcGCMarker::FULL); 177 178 GenCollectedHeap* gch = GenCollectedHeap::heap(); 179 GCCauseSetter gccs(gch, _gc_cause); 180 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); 181 } 182 183 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, 184 size_t size, 185 metaspace::MetadataType mdtype, 186 uint gc_count_before, 187 uint full_gc_count_before, 188 GCCause::Cause gc_cause) 189 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), 190 _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) { 191 assert(_size != 0, "An allocation should always be requested with this operation."); 192 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek()); 193 } 194 195 // Returns true iff concurrent GCs unloads metadata. 196 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { 197 #if INCLUDE_CMSGC 198 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) { 199 MetaspaceGC::set_should_concurrent_collect(true); 200 return true; 201 } 202 #endif 203 204 #if INCLUDE_G1GC 205 if (UseG1GC && ClassUnloadingWithConcurrentMark) { 206 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 207 g1h->policy()->collector_state()->set_initiate_conc_mark_if_possible(true); 208 209 GCCauseSetter x(g1h, _gc_cause); 210 211 // At this point we are supposed to start a concurrent cycle. We 212 // will do so if one is not already in progress. 213 bool should_start = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause); 214 215 if (should_start) { 216 double pause_target = g1h->policy()->max_pause_time_ms(); 217 g1h->do_collection_pause_at_safepoint(pause_target); 218 } 219 return true; 220 } 221 #endif 222 223 return false; 224 } 225 226 void VM_CollectForMetadataAllocation::doit() { 227 SvcGCMarker sgcm(SvcGCMarker::FULL); 228 229 CollectedHeap* heap = Universe::heap(); 230 GCCauseSetter gccs(heap, _gc_cause); 231 232 // Check again if the space is available. Another thread 233 // may have similarly failed a metadata allocation and induced 234 // a GC that freed space for the allocation. 235 if (!MetadataAllocationFailALot) { 236 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 237 if (_result != NULL) { 238 return; 239 } 240 } 241 242 if (initiate_concurrent_GC()) { 243 // For CMS and G1 expand since the collection is going to be concurrent. 244 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 245 if (_result != NULL) { 246 return; 247 } 248 249 log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1"); 250 } 251 252 // Don't clear the soft refs yet. 253 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); 254 // After a GC try to allocate without expanding. Could fail 255 // and expansion will be tried below. 256 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 257 if (_result != NULL) { 258 return; 259 } 260 261 // If still failing, allow the Metaspace to expand. 262 // See delta_capacity_until_GC() for explanation of the 263 // amount of the expansion. 264 // This should work unless there really is no more space 265 // or a MaxMetaspaceSize has been specified on the command line. 266 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 267 if (_result != NULL) { 268 return; 269 } 270 271 // If expansion failed, do a collection clearing soft references. 272 heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs); 273 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 274 if (_result != NULL) { 275 return; 276 } 277 278 log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size); 279 280 if (GCLocker::is_active_and_needs_gc()) { 281 set_gc_locked(); 282 } 283 } 284 285 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) 286 : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) { 287 // Only report if operation was really caused by an allocation. 288 if (_word_size != 0) { 289 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek()); 290 } 291 } --- EOF ---