< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page

        

@@ -599,11 +599,11 @@
       result = _allocator->attempt_allocation_locked(word_size, context);
       if (result != NULL) {
         return result;
       }
 
-      if (GC_locker::is_active_and_needs_gc()) {
+      if (GCLocker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
           // No need for an ergo verbose message here,
           // can_expand_young_list() does this when it returns true.
           result = _allocator->attempt_allocation_force(word_size, context);
           if (result != NULL) {

@@ -615,11 +615,11 @@
         // The GCLocker may not be active but the GCLocker initiated
         // GC may not yet have been performed (GCLocker::needs_gc()
         // returns true). In this case we do not try this GC and
         // wait until the GCLocker initiated GC is performed, and
         // then retry the allocation.
-        if (GC_locker::needs_gc()) {
+        if (GCLocker::needs_gc()) {
           should_try_gc = false;
         } else {
           // Read the GC count while still holding the Heap_lock.
           gc_count_before = total_collections();
           should_try_gc = true;

@@ -651,11 +651,11 @@
         return NULL;
       }
       // The GCLocker is either active or the GCLocker initiated
       // GC has not yet been performed. Stall until it is and
       // then retry the allocation.
-      GC_locker::stall_until_clear();
+      GCLocker::stall_until_clear();
       (*gclocker_retry_count_ret) += 1;
     }
 
     // We can reach here if we were unsuccessful in scheduling a
     // collection (because another thread beat us to it) or if we were

@@ -1026,19 +1026,19 @@
         size_t size_in_regions = humongous_obj_size_in_regions(word_size);
         g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
         return result;
       }
 
-      if (GC_locker::is_active_and_needs_gc()) {
+      if (GCLocker::is_active_and_needs_gc()) {
         should_try_gc = false;
       } else {
          // The GCLocker may not be active but the GCLocker initiated
         // GC may not yet have been performed (GCLocker::needs_gc()
         // returns true). In this case we do not try this GC and
         // wait until the GCLocker initiated GC is performed, and
         // then retry the allocation.
-        if (GC_locker::needs_gc()) {
+        if (GCLocker::needs_gc()) {
           should_try_gc = false;
         } else {
           // Read the GC count while still holding the Heap_lock.
           gc_count_before = total_collections();
           should_try_gc = true;

@@ -1074,11 +1074,11 @@
         return NULL;
       }
       // The GCLocker is either active or the GCLocker initiated
       // GC has not yet been performed. Stall until it is and
       // then retry the allocation.
-      GC_locker::stall_until_clear();
+      GCLocker::stall_until_clear();
       (*gclocker_retry_count_ret) += 1;
     }
 
     // We can reach here if we were unsuccessful in scheduling a
     // collection (because another thread beat us to it) or if we were

@@ -1209,11 +1209,11 @@
 
 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
                                          bool clear_all_soft_refs) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
   gc_timer->register_gc_start();

@@ -2394,12 +2394,12 @@
           // initial-mark GC. No point in starting a new cycle given
           // that the whole heap was collected anyway.
         }
 
         if (retry_gc) {
-          if (GC_locker::is_active_and_needs_gc()) {
-            GC_locker::stall_until_clear();
+          if (GCLocker::is_active_and_needs_gc()) {
+            GCLocker::stall_until_clear();
           }
         }
       }
     } else {
       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc

@@ -3627,11 +3627,11 @@
 bool
 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
   assert_at_safepoint(true /* should_be_vm_thread */);
   guarantee(!is_gc_active(), "collection is not reentrant");
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
   _gc_timer_stw->register_gc_start();
 
< prev index next >