1 /* 2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoader.hpp" 27 #include "classfile/javaClasses.hpp" 28 #include "gc/shared/gcLocker.inline.hpp" 29 #include "gc/shared/genCollectedHeap.hpp" 30 #include "gc/shared/vmGCOperations.hpp" 31 #include "memory/oopFactory.hpp" 32 #include "logging/log.hpp" 33 #include "oops/instanceKlass.hpp" 34 #include "oops/instanceRefKlass.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/init.hpp" 37 #include "runtime/interfaceSupport.hpp" 38 #include "utilities/dtrace.hpp" 39 #include "utilities/macros.hpp" 40 #include "utilities/preserveException.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc/g1/g1CollectedHeap.inline.hpp" 43 #endif // INCLUDE_ALL_GCS 44 45 VM_GC_Operation::~VM_GC_Operation() { 46 CollectedHeap* ch = Universe::heap(); 47 ch->collector_policy()->set_all_soft_refs_clear(false); 48 } 49 50 // The same dtrace probe can't be inserted in two different files, so we 51 // have to call it here, so it's only in one file. Can't create new probes 52 // for the other file anymore. The dtrace probes have to remain stable. 53 void VM_GC_Operation::notify_gc_begin(bool full) { 54 HOTSPOT_GC_BEGIN( 55 full); 56 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 57 } 58 59 void VM_GC_Operation::notify_gc_end() { 60 HOTSPOT_GC_END(); 61 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG(); 62 } 63 64 void VM_GC_Operation::acquire_pending_list_lock() { 65 // we may enter this with pending exception set 66 InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock); 67 } 68 69 70 void VM_GC_Operation::release_and_notify_pending_list_lock() { 71 72 InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock); 73 } 74 75 // Allocations may fail in several threads at about the same time, 76 // resulting in multiple gc requests. We only want to do one of them. 77 // In case a GC locker is active and the need for a GC is already signaled, 78 // we want to skip this GC attempt altogether, without doing a futile 79 // safepoint operation. 80 bool VM_GC_Operation::skip_operation() const { 81 bool skip = (_gc_count_before != Universe::heap()->total_collections()); 82 if (_full && skip) { 83 skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); 84 } 85 if (!skip && GC_locker::is_active_and_needs_gc()) { 86 skip = Universe::heap()->is_maximal_no_gc(); 87 assert(!(skip && (_gc_cause == GCCause::_gc_locker)), 88 "GC_locker cannot be active when initiating GC"); 89 } 90 return skip; 91 } 92 93 bool VM_GC_Operation::doit_prologue() { 94 assert(Thread::current()->is_Java_thread(), "just checking"); 95 assert(((_gc_cause != GCCause::_no_gc) && 96 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause"); 97 98 // To be able to handle a GC the VM initialization needs to be completed. 99 if (!is_init_completed()) { 100 vm_exit_during_initialization( 101 err_msg("GC triggered before VM initialization completed. Try increasing " 102 "NewSize, current value " SIZE_FORMAT "%s.", 103 byte_size_in_proper_unit(NewSize), 104 proper_unit_for_byte_size(NewSize))); 105 } 106 107 acquire_pending_list_lock(); 108 // If the GC count has changed someone beat us to the collection 109 // Get the Heap_lock after the pending_list_lock. 110 Heap_lock->lock(); 111 112 // Check invocations 113 if (skip_operation()) { 114 // skip collection 115 Heap_lock->unlock(); 116 release_and_notify_pending_list_lock(); 117 _prologue_succeeded = false; 118 } else { 119 _prologue_succeeded = true; 120 } 121 return _prologue_succeeded; 122 } 123 124 125 void VM_GC_Operation::doit_epilogue() { 126 assert(Thread::current()->is_Java_thread(), "just checking"); 127 // Release the Heap_lock first. 128 Heap_lock->unlock(); 129 release_and_notify_pending_list_lock(); 130 } 131 132 bool VM_GC_HeapInspection::skip_operation() const { 133 return false; 134 } 135 136 bool VM_GC_HeapInspection::collect() { 137 if (GC_locker::is_active()) { 138 return false; 139 } 140 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection); 141 return true; 142 } 143 144 void VM_GC_HeapInspection::doit() { 145 HandleMark hm; 146 Universe::heap()->ensure_parsability(false); // must happen, even if collection does 147 // not happen (e.g. due to GC_locker) 148 // or _full_gc being false 149 if (_full_gc) { 150 if (!collect()) { 151 // The collection attempt was skipped because the gc locker is held. 152 // The following dump may then be a tad misleading to someone expecting 153 // only live objects to show up in the dump (see CR 6944195). Just issue 154 // a suitable warning in that case and do not attempt to do a collection. 155 // The latter is a subtle point, because even a failed attempt 156 // to GC will, in fact, induce one in the future, which we 157 // probably want to avoid in this case because the GC that we may 158 // be about to attempt holds value for us only 159 // if it happens now and not if it happens in the eventual 160 // future. 161 warning("GC locker is held; pre-dump GC was skipped"); 162 } 163 } 164 HeapInspection inspect(_csv_format, _print_help, _print_class_stats, 165 _columns); 166 inspect.heap_inspection(_out); 167 } 168 169 170 void VM_GenCollectForAllocation::doit() { 171 SvcGCMarker sgcm(SvcGCMarker::MINOR); 172 173 GenCollectedHeap* gch = GenCollectedHeap::heap(); 174 GCCauseSetter gccs(gch, _gc_cause); 175 _result = gch->satisfy_failed_allocation(_word_size, _tlab); 176 assert(gch->is_in_reserved_or_null(_result), "result not in heap"); 177 178 if (_result == NULL && GC_locker::is_active_and_needs_gc()) { 179 set_gc_locked(); 180 } 181 } 182 183 void VM_GenCollectFull::doit() { 184 SvcGCMarker sgcm(SvcGCMarker::FULL); 185 186 GenCollectedHeap* gch = GenCollectedHeap::heap(); 187 GCCauseSetter gccs(gch, _gc_cause); 188 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); 189 } 190 191 // Returns true iff concurrent GCs unloads metadata. 192 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { 193 #if INCLUDE_ALL_GCS 194 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) { 195 MetaspaceGC::set_should_concurrent_collect(true); 196 return true; 197 } 198 199 if (UseG1GC && ClassUnloadingWithConcurrentMark) { 200 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 201 g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true); 202 203 GCCauseSetter x(g1h, _gc_cause); 204 205 // At this point we are supposed to start a concurrent cycle. We 206 // will do so if one is not already in progress. 207 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); 208 209 if (should_start) { 210 double pause_target = g1h->g1_policy()->max_pause_time_ms(); 211 g1h->do_collection_pause_at_safepoint(pause_target); 212 } 213 return true; 214 } 215 #endif 216 217 return false; 218 } 219 220 void VM_CollectForMetadataAllocation::doit() { 221 SvcGCMarker sgcm(SvcGCMarker::FULL); 222 223 CollectedHeap* heap = Universe::heap(); 224 GCCauseSetter gccs(heap, _gc_cause); 225 226 // Check again if the space is available. Another thread 227 // may have similarly failed a metadata allocation and induced 228 // a GC that freed space for the allocation. 229 if (!MetadataAllocationFailALot) { 230 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 231 if (_result != NULL) { 232 return; 233 } 234 } 235 236 if (initiate_concurrent_GC()) { 237 // For CMS and G1 expand since the collection is going to be concurrent. 238 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 239 if (_result != NULL) { 240 return; 241 } 242 243 log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1"); 244 } 245 246 // Don't clear the soft refs yet. 247 heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); 248 // After a GC try to allocate without expanding. Could fail 249 // and expansion will be tried below. 250 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 251 if (_result != NULL) { 252 return; 253 } 254 255 // If still failing, allow the Metaspace to expand. 256 // See delta_capacity_until_GC() for explanation of the 257 // amount of the expansion. 258 // This should work unless there really is no more space 259 // or a MaxMetaspaceSize has been specified on the command line. 260 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype); 261 if (_result != NULL) { 262 return; 263 } 264 265 // If expansion failed, do a last-ditch collection and try allocating 266 // again. A last-ditch collection will clear softrefs. This 267 // behavior is similar to the last-ditch collection done for perm 268 // gen when it was full and a collection for failed allocation 269 // did not free perm gen space. 270 heap->collect_as_vm_thread(GCCause::_last_ditch_collection); 271 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); 272 if (_result != NULL) { 273 return; 274 } 275 276 log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size); 277 278 if (GC_locker::is_active_and_needs_gc()) { 279 set_gc_locked(); 280 } 281 }