< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 53920 : imported patch 8218880-g1-crashes-periodic-gc-gclocker
rev 53921 : [mq]: 8218880-shade-review

@@ -2114,18 +2114,18 @@
 
 void G1CollectedHeap::collect(GCCause::Cause cause) {
   attempt_collect(cause, true);
 }
 
-bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_vmop_failure) {
+bool G1CollectedHeap::attempt_collect(GCCause::Cause cause, bool retry_on_gc_failure) {
   assert_heap_not_locked();
 
-  bool vmop_succeeded;
-  bool should_retry_vmop;
+  bool gc_succeeded;
+  bool should_retry_gc;
 
   do {
-    should_retry_vmop = false;
+    should_retry_gc = false;
 
     uint gc_count_before;
     uint old_marking_count_before;
     uint full_gc_count_before;
 

@@ -2146,21 +2146,21 @@
                                    gc_count_before,
                                    cause,
                                    true,  /* should_initiate_conc_mark */
                                    g1_policy()->max_pause_time_ms());
       VMThread::execute(&op);
-      vmop_succeeded = op.pause_succeeded();
-      if (!vmop_succeeded && retry_on_vmop_failure) {
+      gc_succeeded = op.gc_succeeded();
+      if (!gc_succeeded && retry_on_gc_failure) {
         if (old_marking_count_before == _old_marking_cycles_started) {
-          should_retry_vmop = op.should_retry_gc();
+          should_retry_gc = op.should_retry_gc();
         } else {
           // A Full GC happened while we were trying to schedule the
           // concurrent cycle. No point in starting a new cycle given
           // that the whole heap was collected anyway.
         }
 
-        if (should_retry_vmop && GCLocker::is_active_and_needs_gc()) {
+        if (should_retry_gc && GCLocker::is_active_and_needs_gc()) {
           GCLocker::stall_until_clear();
         }
       }
     } else {
       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc

@@ -2172,20 +2172,20 @@
                                      gc_count_before,
                                      cause,
                                      false, /* should_initiate_conc_mark */
                                      g1_policy()->max_pause_time_ms());
         VMThread::execute(&op);
-        vmop_succeeded = op.pause_succeeded();
+        gc_succeeded = op.gc_succeeded();
       } else {
         // Schedule a Full GC.
         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
         VMThread::execute(&op);
-        vmop_succeeded = op.pause_succeeded();
+        gc_succeeded = op.gc_succeeded();
       }
     }
-  } while (should_retry_vmop);
-  return vmop_succeeded;
+  } while (should_retry_gc);
+  return gc_succeeded;
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
   if (_hrm->reserved().contains(p)) {
     // Given that we know that p is in the reserved space,

@@ -2592,11 +2592,11 @@
                                false, /* should_initiate_conc_mark */
                                g1_policy()->max_pause_time_ms());
   VMThread::execute(&op);
 
   HeapWord* result = op.result();
-  bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
+  bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded();
   assert(result == NULL || ret_succeeded,
          "the result should be NULL if the VM did not succeed");
   *succeeded = ret_succeeded;
 
   assert_heap_not_locked();
< prev index next >