< prev index next >

src/hotspot/share/gc/g1/vm_operations_g1.cpp

Print this page
rev 48467 : 8137099: G1 needs to "upgrade" GC within the safepoint if it can't allocate during that safepoint to avoid OoME
Summary: During a minor GC, if memory allocation fails, start a full GC within the same VM operation in the same safepoint. This avoids a race where the GC locker can prevent the full GC from occurring, and a premature OoME.
Reviewed-by:
Contributed-by: thomas.schatzl@oracle.com, axel.siebenborn@sap.com
rev 48469 : imported patch 8137099-sjohanns-messages
rev 48470 : [mq]: 8137099-erikd-review
rev 48471 : [mq]: 8137099-erikd-review2

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -31,50 +31,37 @@
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "runtime/interfaceSupport.hpp"
 
-VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before,
-                                                     size_t word_size)
-  : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
-                                   GCCause::_allocation_failure) {
-  guarantee(word_size != 0, "An allocation should always be requested with this operation.");
-}
-
-void VM_G1CollectForAllocation::doit() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  GCCauseSetter x(g1h, _gc_cause);
-
-  _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
-  assert(_result == NULL || _pause_succeeded,
-         "if we get back a result, the pause should have succeeded");
-}
-
 void VM_G1CollectFull::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   GCCauseSetter x(g1h, _gc_cause);
   g1h->do_full_collection(false /* clear_all_soft_refs */);
 }
 
-VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint           gc_count_before,
-                                                 size_t         word_size,
+VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t         word_size,
+                                                     uint           gc_count_before,
+                                                     GCCause::Cause gc_cause,
                                                  bool           should_initiate_conc_mark,
                                                  double         target_pause_time_ms,
-                                                 GCCause::Cause gc_cause)
-  : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
+                                                     AllocationContext_t allocation_context)
+  : VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
+    _pause_succeeded(false),
+    _allocation_context(allocation_context),
     _should_initiate_conc_mark(should_initiate_conc_mark),
     _target_pause_time_ms(target_pause_time_ms),
     _should_retry_gc(false),
     _old_marking_cycles_completed_before(0) {
   guarantee(target_pause_time_ms > 0.0,
             "target_pause_time_ms = %1.6lf should be positive",
             target_pause_time_ms);
   _gc_cause = gc_cause;
 }
 
-bool VM_G1IncCollectionPause::doit_prologue() {
-  bool res = VM_G1OperationWithAllocRequest::doit_prologue();
+bool VM_G1CollectForAllocation::doit_prologue() {
+  bool res = VM_CollectForAllocation::doit_prologue();
   if (!res) {
     if (_should_initiate_conc_mark) {
       // The prologue can fail for a couple of reasons. The first is that another GC
       // got scheduled and prevented the scheduling of the initial mark GC. The
       // second is that the GC locker may be active and the heap can't be expanded.

@@ -85,19 +72,19 @@
     }
   }
   return res;
 }
 
-void VM_G1IncCollectionPause::doit() {
+void VM_G1CollectForAllocation::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
       "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
     _result = g1h->attempt_allocation_at_safepoint(_word_size,
-                                                   allocation_context(),
+                                                   _allocation_context,
                                                    false /* expect_null_cur_alloc_region */);
     if (_result != NULL) {
       // If we can successfully allocate before we actually do the
       // pause then we will consider this pause successful.
       _pause_succeeded = true;

@@ -142,31 +129,38 @@
       }
       return;
     }
   }
 
-  _pause_succeeded =
-    g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
-  if (_pause_succeeded && _word_size > 0) {
-    // An allocation had been requested.
-    _result = g1h->attempt_allocation_at_safepoint(_word_size,
-                                                   allocation_context(),
-                                                   true /* expect_null_cur_alloc_region */);
+  // Try a partial collection of some kind.
+  _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
+
+  if (_pause_succeeded) {
+    if (_word_size > 0) {
+      // An allocation had been requested. Do it, eventually trying a stronger
+      // kind of GC.
+      _result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
+    } else if (!g1h->has_regions_left_for_allocation()) {
+      // There has been a request to perform a GC to free some space. We have no
+      // information on how much memory has been asked for. In case there are
+      // absolutely no regions left to allocate into, do a maximally compacting full GC.
+      log_info(gc, ergo)("Attempting maximally compacting collection");
+      _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
+                                                 true   /* clear_all_soft_refs */);
+    }
+    guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
   } else {
     assert(_result == NULL, "invariant");
-    if (!_pause_succeeded) {
-      // Another possible reason reason for the pause to not be successful
-      // is that, again, the GC locker is active (and has become active
-      // since the prologue was executed). In this case we should retry
-      // the pause after waiting for the GC locker to become inactive.
+    // The only reason for the pause to not be successful is that, the GC locker is
+    // active (or has become active since the prologue was executed). In this case
+    // we should retry the pause after waiting for the GC locker to become inactive.
       _should_retry_gc = true;
     }
-  }
 }
 
-void VM_G1IncCollectionPause::doit_epilogue() {
-  VM_G1OperationWithAllocRequest::doit_epilogue();
+void VM_G1CollectForAllocation::doit_epilogue() {
+  VM_CollectForAllocation::doit_epilogue();
 
   // If the pause was initiated by a System.gc() and
   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
   // that just started (or maybe one that was already in progress) to
   // finish.
< prev index next >