index

src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 7780 : imported patch 8072621

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -323,15 +323,15 @@
   // (c) cause == _g1_humongous_allocation
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   // concurrent cycles) we have started.
-  volatile unsigned int _old_marking_cycles_started;
+  volatile uint _old_marking_cycles_started;
 
   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
   // concurrent cycles) we have completed.
-  volatile unsigned int _old_marking_cycles_completed;
+  volatile uint _old_marking_cycles_completed;
 
   bool _concurrent_cycle_started;
   bool _heap_summary_sent;
 
   // This is a non-product method that is helpful for testing. It is

@@ -495,26 +495,26 @@
 
   // First-level mutator allocation attempt: try to allocate out of
   // the mutator alloc region without taking the Heap_lock. This
   // should only be used for non-humongous allocations.
   inline HeapWord* attempt_allocation(size_t word_size,
-                                      unsigned int* gc_count_before_ret,
-                                      int* gclocker_retry_count_ret);
+                                      uint* gc_count_before_ret,
+                                      uint* gclocker_retry_count_ret);
 
   // Second-level mutator allocation attempt: take the Heap_lock and
   // retry the allocation attempt, potentially scheduling a GC
   // pause. This should only be used for non-humongous allocations.
   HeapWord* attempt_allocation_slow(size_t word_size,
                                     AllocationContext_t context,
-                                    unsigned int* gc_count_before_ret,
-                                    int* gclocker_retry_count_ret);
+                                    uint* gc_count_before_ret,
+                                    uint* gclocker_retry_count_ret);
 
   // Takes the Heap_lock and attempts a humongous allocation. It can
   // potentially schedule a GC pause.
   HeapWord* attempt_allocation_humongous(size_t word_size,
-                                         unsigned int* gc_count_before_ret,
-                                         int* gclocker_retry_count_ret);
+                                         uint* gc_count_before_ret,
+                                         uint* gclocker_retry_count_ret);
 
   // Allocation attempt that should be called during safepoints (e.g.,
   // at the end of a successful GC). expect_null_mutator_alloc_region
   // specifies whether the mutator alloc region is expected to be NULL
   // or not.

@@ -684,11 +684,11 @@
   // the FullGCCount_lock in case a Java thread is waiting for a full
   // GC to happen (e.g., it called System.gc() with
   // +ExplicitGCInvokesConcurrent).
   void increment_old_marking_cycles_completed(bool concurrent);
 
-  unsigned int old_marking_cycles_completed() {
+  uint old_marking_cycles_completed() {
     return _old_marking_cycles_completed;
   }
 
   void register_concurrent_cycle_start(const Ticks& start_time);
   void register_concurrent_cycle_end();

@@ -743,11 +743,11 @@
   // gc_count_before (i.e., total_collections()) as a parameter since
   // it has to be read while holding the Heap_lock. Currently, both
   // methods that call do_collection_pause() release the Heap_lock
   // before the call, so it's easy to read gc_count_before just before.
   HeapWord* do_collection_pause(size_t         word_size,
-                                unsigned int   gc_count_before,
+                                uint           gc_count_before,
                                 bool*          succeeded,
                                 GCCause::Cause gc_cause);
 
   // The guts of the incremental collection pause, executed by the vm
   // thread. It returns false if it is unable to do the collection due

@@ -979,11 +979,11 @@
   // Time stamp to validate the regions recorded in the cache
   // used by G1CollectedHeap::start_cset_region_for_worker().
   // The heap region entry for a given worker is valid iff
   // the associated time stamp value matches the current value
   // of G1CollectedHeap::_gc_time_stamp.
-  unsigned int* _worker_cset_start_region_time_stamp;
+  uint* _worker_cset_start_region_time_stamp;
 
   enum G1H_process_roots_tasks {
     G1H_PS_filter_satb_buffers,
     G1H_PS_refProcessor_oops_do,
     // Leave this one last.
index