1 /*
   2  * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc_implementation/shared/vmGCOperations.hpp"
  29 #include "memory/gcLocker.inline.hpp"
  30 #include "memory/genCollectedHeap.hpp"
  31 #include "memory/oopFactory.hpp"
  32 #include "oops/instanceKlass.hpp"
  33 #include "oops/instanceRefKlass.hpp"
  34 #include "runtime/handles.inline.hpp"
  35 #include "runtime/init.hpp"
  36 #include "runtime/interfaceSupport.hpp"
  37 #include "utilities/dtrace.hpp"
  38 #include "utilities/preserveException.hpp"
  39 #include "utilities/macros.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  42 #endif // INCLUDE_ALL_GCS
  43 
  44 #ifndef USDT2
  45 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
  46 HS_DTRACE_PROBE_DECL(hotspot, gc__end);
  47 #endif /* !USDT2 */
  48 
  49 // The same dtrace probe can't be inserted in two different files, so we
  50 // have to call it here, so it's only in one file.  Can't create new probes
  51 // for the other file anymore.   The dtrace probes have to remain stable.
  52 void VM_GC_Operation::notify_gc_begin(bool full) {
  53 #ifndef USDT2
  54   HS_DTRACE_PROBE1(hotspot, gc__begin, full);
  55   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  56 #else /* USDT2 */
  57   HOTSPOT_GC_BEGIN(
  58                    full);
  59   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  60 #endif /* USDT2 */
  61 }
  62 
  63 void VM_GC_Operation::notify_gc_end() {
  64 #ifndef USDT2
  65   HS_DTRACE_PROBE(hotspot, gc__end);
  66   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  67 #else /* USDT2 */
  68   HOTSPOT_GC_END();
  69   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  70 #endif /* USDT2 */
  71 }
  72 
  73 void VM_GC_Operation::acquire_pending_list_lock() {
  74   // we may enter this with pending exception set
  75   InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
  76 }
  77 
  78 
  79 void VM_GC_Operation::release_and_notify_pending_list_lock() {
  80 
  81   InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
  82 }
  83 
  84 // Allocations may fail in several threads at about the same time,
  85 // resulting in multiple gc requests.  We only want to do one of them.
  86 // In case a GC locker is active and the need for a GC is already signaled,
  87 // we want to skip this GC attempt altogether, without doing a futile
  88 // safepoint operation.
  89 bool VM_GC_Operation::skip_operation() const {
  90   bool skip = (_gc_count_before != Universe::heap()->total_collections());
  91   if (_full && skip) {
  92     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
  93   }
  94   if (!skip && GC_locker::is_active_and_needs_gc()) {
  95     skip = Universe::heap()->is_maximal_no_gc();
  96     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
  97            "GC_locker cannot be active when initiating GC");
  98   }
  99   return skip;
 100 }
 101 
 102 bool VM_GC_Operation::doit_prologue() {
 103   assert(Thread::current()->is_Java_thread(), "just checking");
 104   assert(((_gc_cause != GCCause::_no_gc) &&
 105           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
 106 
 107   acquire_pending_list_lock();
 108   // If the GC count has changed someone beat us to the collection
 109   // Get the Heap_lock after the pending_list_lock.
 110   Heap_lock->lock();
 111 
 112   // Check invocations
 113   if (skip_operation()) {
 114     // skip collection
 115     Heap_lock->unlock();
 116     release_and_notify_pending_list_lock();
 117     _prologue_succeeded = false;
 118   } else {
 119     _prologue_succeeded = true;
 120     SharedHeap* sh = SharedHeap::heap();
 121     if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
 122   }
 123   return _prologue_succeeded;
 124 }
 125 
 126 
 127 void VM_GC_Operation::doit_epilogue() {
 128   assert(Thread::current()->is_Java_thread(), "just checking");
 129   // Release the Heap_lock first.
 130   SharedHeap* sh = SharedHeap::heap();
 131   if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
 132   Heap_lock->unlock();
 133   release_and_notify_pending_list_lock();
 134 }
 135 
 136 bool VM_GC_HeapInspection::doit_prologue() {
 137   if (Universe::heap()->supports_heap_inspection()) {
 138     return VM_GC_Operation::doit_prologue();
 139   } else {
 140     return false;
 141   }
 142 }
 143 
 144 bool VM_GC_HeapInspection::skip_operation() const {
 145   assert(Universe::heap()->supports_heap_inspection(), "huh?");
 146   return false;
 147 }
 148 
 149 bool VM_GC_HeapInspection::collect() {
 150   if (GC_locker::is_active()) {
 151     return false;
 152   }
 153   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
 154   return true;
 155 }
 156 
 157 void VM_GC_HeapInspection::doit() {
 158   HandleMark hm;
 159   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
 160                                                // not happen (e.g. due to GC_locker)
 161                                                // or _full_gc being false
 162   if (_full_gc) {
 163     if (!collect()) {
 164       // The collection attempt was skipped because the gc locker is held.
 165       // The following dump may then be a tad misleading to someone expecting
 166       // only live objects to show up in the dump (see CR 6944195). Just issue
 167       // a suitable warning in that case and do not attempt to do a collection.
 168       // The latter is a subtle point, because even a failed attempt
 169       // to GC will, in fact, induce one in the future, which we
 170       // probably want to avoid in this case because the GC that we may
 171       // be about to attempt holds value for us only
 172       // if it happens now and not if it happens in the eventual
 173       // future.
 174       warning("GC locker is held; pre-dump GC was skipped");
 175     }
 176   }
 177   HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
 178                          _columns);
 179   inspect.heap_inspection(_out);
 180 }
 181 
 182 
 183 void VM_GenCollectForAllocation::doit() {
 184   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 185 
 186   GenCollectedHeap* gch = GenCollectedHeap::heap();
 187   GCCauseSetter gccs(gch, _gc_cause);
 188   _res = gch->satisfy_failed_allocation(_size, _tlab);
 189   assert(gch->is_in_reserved_or_null(_res), "result not in heap");
 190 
 191   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
 192     set_gc_locked();
 193   }
 194 }
 195 
 196 void VM_GenCollectFull::doit() {
 197   SvcGCMarker sgcm(SvcGCMarker::FULL);
 198 
 199   GenCollectedHeap* gch = GenCollectedHeap::heap();
 200   GCCauseSetter gccs(gch, _gc_cause);
 201   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 202 }
 203 
 204 void VM_CollectForMetadataAllocation::doit() {
 205   SvcGCMarker sgcm(SvcGCMarker::FULL);
 206 
 207   CollectedHeap* heap = Universe::heap();
 208   GCCauseSetter gccs(heap, _gc_cause);
 209 
 210   // Check again if the space is available.  Another thread
 211   // may have similarly failed a metadata allocation and induced
 212   // a GC that freed space for the allocation.
 213   if (!MetadataAllocationFailALot) {
 214     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 215   }
 216 
 217   if (_result == NULL) {
 218     if (UseConcMarkSweepGC) {
 219       if (CMSClassUnloadingEnabled) {
 220         MetaspaceGC::set_should_concurrent_collect(true);
 221       }
 222       // For CMS expand since the collection is going to be concurrent.
 223       _result =
 224         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
 225     }
 226     if (_result == NULL) {
 227       // Don't clear the soft refs yet.
 228       if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
 229         gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
 230       }
 231       heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
 232       // After a GC try to allocate without expanding.  Could fail
 233       // and expansion will be tried below.
 234       _result =
 235         _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 236     }
 237     if (_result == NULL) {
 238       // If still failing, allow the Metaspace to expand.
 239       // See delta_capacity_until_GC() for explanation of the
 240       // amount of the expansion.
 241       // This should work unless there really is no more space
 242       // or a MaxMetaspaceSize has been specified on the command line.
 243       _result =
 244         _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
 245       if (_result == NULL) {
 246         // If expansion failed, do a last-ditch collection and try allocating
 247         // again.  A last-ditch collection will clear softrefs.  This
 248         // behavior is similar to the last-ditch collection done for perm
 249         // gen when it was full and a collection for failed allocation
 250         // did not free perm gen space.
 251         heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
 252         _result =
 253           _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 254       }
 255     }
 256     if (Verbose && PrintGCDetails && _result == NULL) {
 257       gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
 258                              SIZE_FORMAT, _size);
 259     }
 260   }
 261 
 262   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
 263     set_gc_locked();
 264   }
 265 }