1 /*
   2  * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/shared/allocTracer.hpp"
  29 #include "gc/shared/gcId.hpp"
  30 #include "gc/shared/gcLocker.inline.hpp"
  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/vmGCOperations.hpp"
  33 #include "logging/log.hpp"
  34 #include "memory/oopFactory.hpp"
  35 #include "runtime/handles.inline.hpp"
  36 #include "runtime/init.hpp"
  37 #include "runtime/interfaceSupport.hpp"
  38 #include "utilities/dtrace.hpp"
  39 #include "utilities/macros.hpp"
  40 #include "utilities/preserveException.hpp"
  41 #if INCLUDE_ALL_GCS
  42 #include "gc/g1/g1CollectedHeap.inline.hpp"
  43 #include "gc/g1/g1Policy.hpp"
  44 #endif // INCLUDE_ALL_GCS
  45 
  46 VM_GC_Operation::~VM_GC_Operation() {
  47   CollectedHeap* ch = Universe::heap();
  48   ch->collector_policy()->set_all_soft_refs_clear(false);
  49 }
  50 
  51 // The same dtrace probe can't be inserted in two different files, so we
  52 // have to call it here, so it's only in one file.  Can't create new probes
  53 // for the other file anymore.   The dtrace probes have to remain stable.
  54 void VM_GC_Operation::notify_gc_begin(bool full) {
  55   HOTSPOT_GC_BEGIN(
  56                    full);
  57   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  58 }
  59 
  60 void VM_GC_Operation::notify_gc_end() {
  61   HOTSPOT_GC_END();
  62   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  63 }
  64 
  65 void VM_GC_Operation::acquire_pending_list_lock() {
  66   _pending_list_locker.lock();
  67 }
  68 
  69 void VM_GC_Operation::release_and_notify_pending_list_lock() {
  70   _pending_list_locker.unlock();
  71 }
  72 
  73 // Allocations may fail in several threads at about the same time,
  74 // resulting in multiple gc requests.  We only want to do one of them.
  75 // In case a GC locker is active and the need for a GC is already signaled,
  76 // we want to skip this GC attempt altogether, without doing a futile
  77 // safepoint operation.
  78 bool VM_GC_Operation::skip_operation() const {
  79   bool skip = (_gc_count_before != Universe::heap()->total_collections());
  80   if (_full && skip) {
  81     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
  82   }
  83   if (!skip && GCLocker::is_active_and_needs_gc()) {
  84     skip = Universe::heap()->is_maximal_no_gc();
  85     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
  86            "GCLocker cannot be active when initiating GC");
  87   }
  88   return skip;
  89 }
  90 
  91 bool VM_GC_Operation::doit_prologue() {
  92   assert(Thread::current()->is_Java_thread(), "just checking");
  93   assert(((_gc_cause != GCCause::_no_gc) &&
  94           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
  95 
  96   // To be able to handle a GC the VM initialization needs to be completed.
  97   if (!is_init_completed()) {
  98     vm_exit_during_initialization(
  99       err_msg("GC triggered before VM initialization completed. Try increasing "
 100               "NewSize, current value " SIZE_FORMAT "%s.",
 101               byte_size_in_proper_unit(NewSize),
 102               proper_unit_for_byte_size(NewSize)));
 103   }
 104 
 105   acquire_pending_list_lock();
 106   // If the GC count has changed someone beat us to the collection
 107   // Get the Heap_lock after the pending_list_lock.
 108   Heap_lock->lock();
 109 
 110   // Check invocations
 111   if (skip_operation()) {
 112     // skip collection
 113     Heap_lock->unlock();
 114     release_and_notify_pending_list_lock();
 115     _prologue_succeeded = false;
 116   } else {
 117     _prologue_succeeded = true;
 118   }
 119   return _prologue_succeeded;
 120 }
 121 
 122 
 123 void VM_GC_Operation::doit_epilogue() {
 124   assert(Thread::current()->is_Java_thread(), "just checking");
 125   // Release the Heap_lock first.
 126   Heap_lock->unlock();
 127   release_and_notify_pending_list_lock();
 128 }
 129 
 130 bool VM_GC_HeapInspection::skip_operation() const {
 131   return false;
 132 }
 133 
 134 bool VM_GC_HeapInspection::collect() {
 135   if (GCLocker::is_active()) {
 136     return false;
 137   }
 138   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
 139   return true;
 140 }
 141 
 142 void VM_GC_HeapInspection::doit() {
 143   HandleMark hm;
 144   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
 145                                                // not happen (e.g. due to GCLocker)
 146                                                // or _full_gc being false
 147   if (_full_gc) {
 148     if (!collect()) {
 149       // The collection attempt was skipped because the gc locker is held.
 150       // The following dump may then be a tad misleading to someone expecting
 151       // only live objects to show up in the dump (see CR 6944195). Just issue
 152       // a suitable warning in that case and do not attempt to do a collection.
 153       // The latter is a subtle point, because even a failed attempt
 154       // to GC will, in fact, induce one in the future, which we
 155       // probably want to avoid in this case because the GC that we may
 156       // be about to attempt holds value for us only
 157       // if it happens now and not if it happens in the eventual
 158       // future.
 159       log_warning(gc)("GC locker is held; pre-dump GC was skipped");
 160     }
 161   }
 162   HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
 163                          _columns);
 164   inspect.heap_inspection(_out);
 165 }
 166 
 167 
 168 void VM_GenCollectForAllocation::doit() {
 169   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 170 
 171   GenCollectedHeap* gch = GenCollectedHeap::heap();
 172   GCCauseSetter gccs(gch, _gc_cause);
 173   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
 174   assert(gch->is_in_reserved_or_null(_result), "result not in heap");
 175 
 176   if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
 177     set_gc_locked();
 178   }
 179 }
 180 
 181 void VM_GenCollectFull::doit() {
 182   SvcGCMarker sgcm(SvcGCMarker::FULL);
 183 
 184   GenCollectedHeap* gch = GenCollectedHeap::heap();
 185   GCCauseSetter gccs(gch, _gc_cause);
 186   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
 187 }
 188 
 189 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
 190                                                                  size_t size,
 191                                                                  Metaspace::MetadataType mdtype,
 192                                                                  uint gc_count_before,
 193                                                                  uint full_gc_count_before,
 194                                                                  GCCause::Cause gc_cause)
 195     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
 196       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
 197   assert(_size != 0, "An allocation should always be requested with this operation.");
 198   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
 199 }
 200 
 201 // Returns true iff concurrent GCs unloads metadata.
 202 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 203 #if INCLUDE_ALL_GCS
 204   if (UseConcMarkSweepGC && ClassUnloadingWithConcurrentMark) {
 205     MetaspaceGC::set_should_concurrent_collect(true);
 206     return true;
 207   }
 208 
 209   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 210     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 211     g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
 212 
 213     GCCauseSetter x(g1h, _gc_cause);
 214 
 215     // At this point we are supposed to start a concurrent cycle. We
 216     // will do so if one is not already in progress.
 217     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 218 
 219     if (should_start) {
 220       double pause_target = g1h->g1_policy()->max_pause_time_ms();
 221       g1h->do_collection_pause_at_safepoint(pause_target);
 222     }
 223     return true;
 224   }
 225 #endif
 226 
 227   return false;
 228 }
 229 
 230 void VM_CollectForMetadataAllocation::doit() {
 231   SvcGCMarker sgcm(SvcGCMarker::FULL);
 232 
 233   CollectedHeap* heap = Universe::heap();
 234   GCCauseSetter gccs(heap, _gc_cause);
 235 
 236   // Check again if the space is available.  Another thread
 237   // may have similarly failed a metadata allocation and induced
 238   // a GC that freed space for the allocation.
 239   if (!MetadataAllocationFailALot) {
 240     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 241     if (_result != NULL) {
 242       return;
 243     }
 244   }
 245 
 246   if (initiate_concurrent_GC()) {
 247     // For CMS and G1 expand since the collection is going to be concurrent.
 248     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
 249     if (_result != NULL) {
 250       return;
 251     }
 252 
 253     log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
 254   }
 255 
 256   // Don't clear the soft refs yet.
 257   heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
 258   // After a GC try to allocate without expanding.  Could fail
 259   // and expansion will be tried below.
 260   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 261   if (_result != NULL) {
 262     return;
 263   }
 264 
 265   // If still failing, allow the Metaspace to expand.
 266   // See delta_capacity_until_GC() for explanation of the
 267   // amount of the expansion.
 268   // This should work unless there really is no more space
 269   // or a MaxMetaspaceSize has been specified on the command line.
 270   _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
 271   if (_result != NULL) {
 272     return;
 273   }
 274 
 275   // If expansion failed, do a collection clearing soft references.
 276   heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs);
 277   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 278   if (_result != NULL) {
 279     return;
 280   }
 281 
 282   log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
 283 
 284   if (GCLocker::is_active_and_needs_gc()) {
 285     set_gc_locked();
 286   }
 287 }
 288 
 289 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
 290     : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
 291   // Only report if operation was really caused by an allocation.
 292   if (_word_size != 0) {
 293     AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
 294   }
 295 }