rev 8024 : imported patch event1
* * *
imported patch event2

   1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc_implementation/shared/vmGCOperations.hpp"
  29 #include "memory/gcLocker.inline.hpp"
  30 #include "memory/genCollectedHeap.hpp"
  31 #include "memory/oopFactory.hpp"
  32 #include "oops/instanceKlass.hpp"
  33 #include "oops/instanceRefKlass.hpp"
  34 #include "runtime/handles.inline.hpp"
  35 #include "runtime/init.hpp"
  36 #include "runtime/interfaceSupport.hpp"
  37 #include "utilities/dtrace.hpp"
  38 #include "utilities/preserveException.hpp"
  39 #include "utilities/macros.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  42 #endif // INCLUDE_ALL_GCS
  43 
  44 VM_GC_Operation::~VM_GC_Operation() {
  45   CollectedHeap* ch = Universe::heap();
  46   ch->collector_policy()->set_all_soft_refs_clear(false);
  47 }
  48 
  49 // The same dtrace probe can't be inserted in two different files, so we
  50 // have to call it here, so it's only in one file.  Can't create new probes
  51 // for the other file anymore.   The dtrace probes have to remain stable.
  52 void VM_GC_Operation::notify_gc_begin(bool full) {
  53   HOTSPOT_GC_BEGIN(
  54                    full);
  55   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  56 }
  57 
  58 void VM_GC_Operation::notify_gc_end() {
  59   HOTSPOT_GC_END();
  60   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
  61 }
  62 
  63 void VM_GC_Operation::acquire_pending_list_lock() {
  64   // we may enter this with pending exception set
  65   InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
  66 }
  67 
  68 
  69 void VM_GC_Operation::release_and_notify_pending_list_lock() {
  70 
  71   InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
  72 }
  73 
  74 // Allocations may fail in several threads at about the same time,
  75 // resulting in multiple gc requests.  We only want to do one of them.
  76 // In case a GC locker is active and the need for a GC is already signaled,
  77 // we want to skip this GC attempt altogether, without doing a futile
  78 // safepoint operation.
  79 bool VM_GC_Operation::skip_operation() const {
  80   bool skip = (_gc_count_before != Universe::heap()->total_collections());
  81   if (_full && skip) {
  82     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
  83   }
  84   if (!skip && GC_locker::is_active_and_needs_gc()) {
  85     skip = Universe::heap()->is_maximal_no_gc();
  86     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
  87            "GC_locker cannot be active when initiating GC");
  88   }
  89   return skip;
  90 }
  91 
  92 bool VM_GC_Operation::doit_prologue() {
  93   assert(Thread::current()->is_Java_thread(), "just checking");
  94   assert(((_gc_cause != GCCause::_no_gc) &&
  95           (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
  96 
  97   // To be able to handle a GC the VM initialization needs to be completed.
  98   if (!is_init_completed()) {
  99     vm_exit_during_initialization(
 100       err_msg("GC triggered before VM initialization completed. Try increasing "
 101               "NewSize, current value " SIZE_FORMAT "%s.",
 102               byte_size_in_proper_unit(NewSize),
 103               proper_unit_for_byte_size(NewSize)));
 104   }
 105 
 106   acquire_pending_list_lock();
 107   // If the GC count has changed someone beat us to the collection
 108   // Get the Heap_lock after the pending_list_lock.
 109   Heap_lock->lock();
 110 
 111   // Check invocations
 112   if (skip_operation()) {
 113     // skip collection
 114     Heap_lock->unlock();
 115     release_and_notify_pending_list_lock();
 116     _prologue_succeeded = false;
 117   } else {
 118     _prologue_succeeded = true;
 119     SharedHeap* sh = SharedHeap::heap();
 120     if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
 121   }
 122   return _prologue_succeeded;
 123 }
 124 
 125 
 126 void VM_GC_Operation::doit_epilogue() {
 127   assert(Thread::current()->is_Java_thread(), "just checking");
 128   // Release the Heap_lock first.
 129   SharedHeap* sh = SharedHeap::heap();
 130   if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
 131   Heap_lock->unlock();
 132   release_and_notify_pending_list_lock();
 133 }
 134 
 135 bool VM_GC_HeapInspection::doit_prologue() {
 136   if (Universe::heap()->supports_heap_inspection()) {
 137     return VM_GC_Operation::doit_prologue();
 138   } else {
 139     return false;
 140   }
 141 }
 142 
 143 bool VM_GC_HeapInspection::skip_operation() const {
 144   assert(Universe::heap()->supports_heap_inspection(), "huh?");
 145   return false;
 146 }
 147 
 148 bool VM_GC_HeapInspection::collect() {
 149   if (GC_locker::is_active()) {
 150     return false;
 151   }
 152   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
 153   return true;
 154 }
 155 
 156 void VM_GC_HeapInspection::doit() {
 157   HandleMark hm;
 158   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
 159                                                // not happen (e.g. due to GC_locker)
 160                                                // or _full_gc being false
 161   if (_full_gc) {
 162     if (!collect()) {
 163       // The collection attempt was skipped because the gc locker is held.
 164       // The following dump may then be a tad misleading to someone expecting
 165       // only live objects to show up in the dump (see CR 6944195). Just issue
 166       // a suitable warning in that case and do not attempt to do a collection.
 167       // The latter is a subtle point, because even a failed attempt
 168       // to GC will, in fact, induce one in the future, which we
 169       // probably want to avoid in this case because the GC that we may
 170       // be about to attempt holds value for us only
 171       // if it happens now and not if it happens in the eventual
 172       // future.
 173       warning("GC locker is held; pre-dump GC was skipped");
 174     }
 175   }
 176   HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
 177                          _columns);
 178   inspect.heap_inspection(_out);
 179 }
 180 
 181 
 182 void VM_GenCollectForAllocation::doit() {
 183   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 184 
 185   GenCollectedHeap* gch = GenCollectedHeap::heap();
 186   GCCauseSetter gccs(gch, _gc_cause);
 187   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
 188   assert(gch->is_in_reserved_or_null(_result), "result not in heap");
 189 
 190   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
 191     set_gc_locked();
 192   }
 193 }
 194 
 195 void VM_GenCollectFull::doit() {
 196   SvcGCMarker sgcm(SvcGCMarker::FULL);
 197 
 198   GenCollectedHeap* gch = GenCollectedHeap::heap();
 199   GCCauseSetter gccs(gch, _gc_cause);
 200   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 201 }
 202 
 203 void VM_CollectForMetadataAllocation::doit_epilogue() {
 204   AllocTracer::send_collect_for_allocation_event(_size * HeapWordSize, _gcid, _gc_attempt);
 205   VM_GC_Operation::doit_epilogue();
 206 }
 207 
 208 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
 209                                                                  size_t size,
 210                                                                  Metaspace::MetadataType mdtype,
 211                                                                  uint gc_count_before,
 212                                                                  uint full_gc_count_before,
 213                                                                  GCCause::Cause gc_cause,
 214                                                                  uint gc_attempt)
 215     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
 216       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL), _gc_attempt(gc_attempt), _gcid(GCId::peek()) {
 217   assert(_size != 0, "An allocation should always be requested with this operation.");
 218   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, _gcid, _gc_attempt);
 219 }
 220 
 221 // Returns true iff concurrent GCs unloads metadata.
 222 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 223 #if INCLUDE_ALL_GCS
 224   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 225     MetaspaceGC::set_should_concurrent_collect(true);
 226     return true;
 227   }
 228 
 229   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 230     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 231     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 232 
 233     GCCauseSetter x(g1h, _gc_cause);
 234 
 235     // At this point we are supposed to start a concurrent cycle. We
 236     // will do so if one is not already in progress.
 237     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 238 
 239     if (should_start) {
 240       double pause_target = g1h->g1_policy()->max_pause_time_ms();
 241       g1h->do_collection_pause_at_safepoint(pause_target);
 242     }
 243     return true;
 244   }
 245 #endif
 246 
 247   return false;
 248 }
 249 
 250 static void log_metaspace_alloc_failure_for_concurrent_GC() {
 251   if (Verbose && PrintGCDetails) {
 252     if (UseConcMarkSweepGC) {
 253       gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
 254     } else if (UseG1GC) {
 255       gclog_or_tty->print_cr("\nG1 full GC for Metaspace");
 256     }
 257   }
 258 }
 259 
 260 void VM_CollectForMetadataAllocation::doit() {
 261   SvcGCMarker sgcm(SvcGCMarker::FULL);
 262 
 263   CollectedHeap* heap = Universe::heap();
 264   GCCauseSetter gccs(heap, _gc_cause);
 265 
 266   // Check again if the space is available.  Another thread
 267   // may have similarly failed a metadata allocation and induced
 268   // a GC that freed space for the allocation.
 269   if (!MetadataAllocationFailALot) {
 270     _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 271     if (_result != NULL) {
 272       return;
 273     }
 274   }
 275 
 276   if (initiate_concurrent_GC()) {
 277     // For CMS and G1 expand since the collection is going to be concurrent.
 278     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
 279     if (_result != NULL) {
 280       return;
 281     }
 282 
 283     log_metaspace_alloc_failure_for_concurrent_GC();
 284   }
 285 
 286   // Don't clear the soft refs yet.
 287   heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
 288   // After a GC try to allocate without expanding.  Could fail
 289   // and expansion will be tried below.
 290   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 291   if (_result != NULL) {
 292     return;
 293   }
 294 
 295   // If still failing, allow the Metaspace to expand.
 296   // See delta_capacity_until_GC() for explanation of the
 297   // amount of the expansion.
 298   // This should work unless there really is no more space
 299   // or a MaxMetaspaceSize has been specified on the command line.
 300   _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
 301   if (_result != NULL) {
 302     return;
 303   }
 304 
 305   // If expansion failed, do a last-ditch collection and try allocating
 306   // again.  A last-ditch collection will clear softrefs.  This
 307   // behavior is similar to the last-ditch collection done for perm
 308   // gen when it was full and a collection for failed allocation
 309   // did not free perm gen space.
 310   heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
 311   _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
 312   if (_result != NULL) {
 313     return;
 314   }
 315 
 316   if (Verbose && PrintGCDetails) {
 317     gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
 318                            SIZE_FORMAT, _size);
 319   }
 320 
 321   if (GC_locker::is_active_and_needs_gc()) {
 322     set_gc_locked();
 323   }
 324 }
 325 
 326 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause, uint gc_attempt)
 327     : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size), _gc_attempt(gc_attempt), _gcid(GCId::peek()) {
 328   // Only report if operation was really caused by an allocation.
 329   if (_word_size != 0) {
 330     AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, _gcid, _gc_attempt);
 331   }
 332 }
 333 
 334 void VM_CollectForAllocation::doit_epilogue() {
 335   // Only report if operation was caused by an allocation.
 336   if (_word_size != 0) {
 337     AllocTracer::send_collect_for_allocation_event(_word_size * HeapWordSize, _gcid, _gc_attempt);
 338   }
 339   VM_GC_Operation::doit_epilogue();
 340 }
--- EOF ---