1 /* 2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoader.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "gc/shared/allocTracer.hpp" 29 #include "gc/shared/gcId.hpp" 30 #include "gc/shared/gcLocker.inline.hpp" 31 #include "gc/shared/genCollectedHeap.hpp" 32 #include "gc/shared/vmGCOperations.hpp" 33 #include "logging/log.hpp" 34 #include "memory/oopFactory.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/init.hpp" 37 #include "runtime/interfaceSupport.hpp" 38 #include "utilities/dtrace.hpp" 39 #include "utilities/macros.hpp" 40 #include "utilities/preserveException.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc/g1/g1CollectedHeap.inline.hpp" 43 #endif // INCLUDE_ALL_GCS 44 45 VM_GC_Operation::~VM_GC_Operation() { 46 CollectedHeap* ch = Universe::heap(); 47 ch->collector_policy()->set_all_soft_refs_clear(false); 48 } 49 50 // The same dtrace probe can't be inserted in two different files, so we 51 // have to call it here, so it's only in one file. Can't create new probes 52 // for the other file anymore. The dtrace probes have to remain stable. 53 void VM_GC_Operation::notify_gc_begin(bool full) { 54 HOTSPOT_GC_BEGIN( 55 full); 56 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 57 } 58 59 void VM_GC_Operation::notify_gc_end() { 60 HOTSPOT_GC_END(); 61 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 62 } 63 64 void VM_GC_Operation::acquire_pending_list_lock() { 65 _pending_list_locker.lock(); 66 } 67 68 void VM_GC_Operation::release_and_notify_pending_list_lock() { 69 _pending_list_locker.unlock(); 70 } 71 72 // Allocations may fail in several threads at about the same time, 73 // resulting in multiple gc requests. We only want to do one of them. 74 // In case a GC locker is active and the need for a GC is already signaled, 75 // we want to skip this GC attempt altogether, without doing a futile 76 // safepoint operation. 77 bool VM_GC_Operation::skip_operation() const { 78 bool skip = (_gc_count_before != Universe::heap()->total_collections()); 79 if (_full && skip) { 80 skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); 81 } 82 if (!skip && GCLocker::is_active_and_needs_gc()) { 83 skip = Universe::heap()->is_maximal_no_gc(); 84 assert(!(skip && (_gc_cause == GCCause::_gc_locker)), 85 "GCLocker cannot be active when initiating GC"); 86 } 87 return skip; 88 } 89 90 bool VM_GC_Operation::doit_prologue() { 91 assert(Thread::current()->is_Java_thread(), "just checking"); 92 assert(((_gc_cause != GCCause::_no_gc) && 93 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); 94 95 // To be able to handle a GC the VM initialization needs to be completed. 96 if (!is_init_completed()) { 97 vm_exit_during_initialization( 98 err_msg("GC triggered before VM initialization completed. Try increasing " 99 "NewSize, current value " SIZE_FORMAT "%s.", 100 byte_size_in_proper_unit(NewSize), 101 proper_unit_for_byte_size(NewSize))); 102 } 103 104 acquire_pending_list_lock(); 105 // If the GC count has changed someone beat us to the collection 106 // Get the Heap_lock after the pending_list_lock. 107 Heap_lock->lock(); 108 109 // Check invocations 110 if (skip_operation()) { 111 // skip collection 112 Heap_lock->unlock(); 113 release_and_notify_pending_list_lock(); 114 _prologue_succeeded = false; 115 } else { 116 _prologue_succeeded = true; 117 } 118 return _prologue_succeeded; 119 } 120 121 122 void VM_GC_Operation::doit_epilogue() { 123 assert(Thread::current()->is_Java_thread(), "just checking"); 124 // Release the Heap_lock first. 125 Heap_lock->unlock(); 126 release_and_notify_pending_list_lock(); 127 } 128 129 bool VM_GC_HeapInspection::skip_operation() const { 130 return false; 131 } 132 133 bool VM_GC_HeapInspection::collect() { 134 if (GCLocker::is_active()) { 135 return false; 136 } 137 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); 138 return true; 139 } 140 141 void VM_GC_HeapInspection::doit() { 142 HandleMark hm; 143 Universe::heap()->ensure_parsability(false); // must happen, even if collection does 144 // not happen (e.g. due to GCLocker) 145 // or _full_gc being false 146 if (_full_gc) { 147 if (!collect()) { 148 // The collection attempt was skipped because the gc locker is held. 149 // The following dump may then be a tad misleading to someone expecting 150 // only live objects to show up in the dump (see CR 6944195). Just issue 151 // a suitable warning in that case and do not attempt to do a collection. 152 // The latter is a subtle point, because even a failed attempt 153 // to GC will, in fact, induce one in the future, which we 154 // probably want to avoid in this case because the GC that we may 155 // be about to attempt holds value for us only 156 // if it happens now and not if it happens in the eventual 157 // future. 158 warning("GC locker is held; pre-dump GC was skipped"); 159 } 160 } 161 HeapInspection inspect(_csv_format, _print_help, _print_class_stats, 162 _columns); 163 inspect.heap_inspection(_out); 164 } 165 166 167 void VM_GenCollectForAllocation::doit() { 168 SvcGCMarker sgcm(SvcGCMarker::MINOR); 169 170 GenCollectedHeap* gch = GenCollectedHeap::heap(); 171 GCCauseSetter gccs(gch, _gc_cause); 172 _result = gch->satisfy_failed_allocation(_word_size, _tlab); 173 assert(gch->is_in_reserved_or_null(_result), "result not in heap"); 174 175 if (_result == NULL && GCLocker::is_active_and_needs_gc()) { 176 set_gc_locked(); 177 } 178 } 179 180 void VM_GenCollectFull::doit() { 181 SvcGCMarker sgcm(SvcGCMarker::FULL); 182 183 GenCollectedHeap* gch = GenCollectedHeap::heap(); 184 GCCauseSetter gccs(gch, _gc_cause); 185 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); 186 } 187 188 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, 189 size_t size, 190 Metaspace::MetadataType mdtype, 191 uint gc_count_before, 192 uint full_gc_count_before, 193 GCCause::Cause gc_cause) 194 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), 195 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { 196 assert(_size != 0, "An allocation should always be requested with this operation."); 197 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek()); 198 } 199 200 // Returns true iff concurrent GCs unloads metadata. 201 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { 202 #if INCLUDE_ALL_GCS 203 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) { 204 MetaspaceGC::set_should_concurrent_collect(true); 205 return true; 206 } 207 208 if (UseG1GC && ClassUnloadingWithConcurrentMark) { 209 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 210 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true); 211 212 GCCauseSetter x(g1h, _gc_cause); 213 214 // At this point we are supposed to start a concurrent cycle. We 215 // will do so if one is not already in progress. 216 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); 217 218 if (should_start) { 219 double pause_target = g1h->g1_policy()->max_pause_time_ms(); 220 g1h->do_collection_pause_at_safepoint(pause_target); 221 } 222 return true; 223 } 224 #endif 225 226 return false; 227 } 228 229 void VM_CollectForMetadataAllocation::doit() { 230 SvcGCMarker sgcm(SvcGCMarker::FULL); 231 232 CollectedHeap* heap = Universe::heap(); 233 GCCauseSetter gccs(heap, _gc_cause); 234 235 // Check again if the space is available. Another thread 236 // may have similarly failed a metadata allocation and induced 237 // a GC that freed space for the allocation. 238 if (!MetadataAllocationFailALot) { 239 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 240 if (_result != NULL) { 241 return; 242 } 243 } 244 245 if (initiate_concurrent_GC()) { 246 // For CMS and G1 expand since the collection is going to be concurrent. 247 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 248 if (_result != NULL) { 249 return; 250 } 251 252 log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1"); 253 } 254 255 // Don't clear the soft refs yet. 256 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); 257 // After a GC try to allocate without expanding. Could fail 258 // and expansion will be tried below. 259 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 260 if (_result != NULL) { 261 return; 262 } 263 264 // If still failing, allow the Metaspace to expand. 265 // See delta_capacity_until_GC() for explanation of the 266 // amount of the expansion. 267 // This should work unless there really is no more space 268 // or a MaxMetaspaceSize has been specified on the command line. 269 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 270 if (_result != NULL) { 271 return; 272 } 273 274 // If expansion failed, do a last-ditch collection and try allocating 275 // again. A last-ditch collection will clear softrefs. This 276 // behavior is similar to the last-ditch collection done for perm 277 // gen when it was full and a collection for failed allocation 278 // did not free perm gen space. 279 heap->collect_as_vm_thread(GCCause::_last_ditch_collection); 280 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 281 if (_result != NULL) { 282 return; 283 } 284 285 log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size); 286 287 if (GCLocker::is_active_and_needs_gc()) { 288 set_gc_locked(); 289 } 290 } 291 292 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) 293 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) { 294 // Only report if operation was really caused by an allocation. 295 if (_word_size != 0) { 296 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek()); 297 } 298 }