< prev index next >

src/share/vm/gc_implementation/g1/g1Allocator.hpp

Print this page
rev 7903 : [mq]: 8073052-kim-sangheon-stefanj-changes

@@ -28,23 +28,20 @@
 #include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
 #include "gc_implementation/g1/g1InCSetState.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
 
-// Base class for G1 allocators.
+// Interface to keep track of which regions G1 is currently allocating into and
+// allowing access to it (e.g. allocating into them, or getting their occupancy).
+// Also keeps track of retained regions across GCs.
 class G1Allocator : public CHeapObj<mtGC> {
   friend class VMStructs;
 protected:
   G1CollectedHeap* _g1h;
 
-  // Outside of GC pauses, the number of bytes used in all regions other
-  // than the current allocation region.
-  size_t _summary_bytes_used;
-
 public:
-   G1Allocator(G1CollectedHeap* heap) :
-     _g1h(heap), _summary_bytes_used(0) { }
+  G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
 
    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
 
    virtual void init_mutator_alloc_region() = 0;
    virtual void release_mutator_alloc_region() = 0;

@@ -54,44 +51,23 @@
    virtual void abandon_gc_alloc_regions() = 0;
 
    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
    virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
    virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
-   virtual size_t                 used() = 0;
-   virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
 
+  virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
    void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
                                                             OldGCAllocRegion* old,
                                                             HeapRegion** retained);
 
-   size_t used_unlocked() const {
-     return _summary_bytes_used;
-   }
-
-   void increase_used(size_t bytes) {
-     _summary_bytes_used += bytes;
-   }
-
-   void decrease_used(size_t bytes) {
-     assert(_summary_bytes_used >= bytes,
-            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
-                _summary_bytes_used, bytes));
-     _summary_bytes_used -= bytes;
-   }
-
-   void set_used(size_t bytes) {
-     _summary_bytes_used = bytes;
-   }
-
-   virtual HeapRegion* new_heap_region(uint hrs_index,
-                                       G1BlockOffsetSharedArray* sharedOffsetArray,
-                                       MemRegion mr) {
-     return new HeapRegion(hrs_index, sharedOffsetArray, mr);
-   }
+  // Returns the amount of memory that is in use by the managed allocation regions.
+  virtual size_t                 used_in_alloc_regions() const = 0;
 };
 
-// The default allocator for G1.
+// The default allocation region manager for G1. Provides a single mutator, survivor
+// and old generation allocation region.
+// Can retain the old generation allocation region across GCs.
 class G1DefaultAllocator : public G1Allocator {
 protected:
   // Alloc region used to satisfy mutator allocation requests.
   MutatorAllocRegion _mutator_alloc_region;
 

@@ -128,31 +104,32 @@
 
   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
     return &_old_gc_alloc_region;
   }
 
-  virtual size_t used() {
+  virtual size_t used_in_alloc_regions() const {
     assert(Heap_lock->owner() != NULL,
            "Should be owned on this thread's behalf.");
-    size_t result = _summary_bytes_used;
+    size_t result = 0;
 
     // Read only once in case it is set to NULL concurrently
-    HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+    HeapRegion* hr = _mutator_alloc_region.get();
     if (hr != NULL) {
       result += hr->used();
     }
     return result;
   }
 };
 
-class G1ParGCAllocBuffer: public ParGCAllocBuffer {
+// A PLAB used during garbage collection that is specific to G1.
+class G1PLAB: public ParGCAllocBuffer {
 private:
   bool _retired;
 
 public:
-  G1ParGCAllocBuffer(size_t gclab_word_size);
-  virtual ~G1ParGCAllocBuffer() {
+  G1PLAB(size_t gclab_word_size);
+  virtual ~G1PLAB() {
     guarantee(_retired, "Allocation buffer has not been retired");
   }
 
   virtual void set_buf(HeapWord* buf) {
     ParGCAllocBuffer::set_buf(buf);

@@ -166,11 +143,14 @@
     ParGCAllocBuffer::retire();
     _retired = true;
   }
 };
 
-class G1ParGCAllocator : public CHeapObj<mtGC> {
+// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
+// Needs to handle multiple contexts, extra alignment in any "survivor" area and some
+// statistics.
+class G1PLABAllocator : public CHeapObj<mtGC> {
   friend class G1ParScanThreadState;
 protected:
   G1CollectedHeap* _g1h;
 
   // The survivor alignment in effect in bytes.

@@ -185,11 +165,11 @@
 
   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
   virtual void retire_alloc_buffers() = 0;
-  virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
+  virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 
   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
   // there are no restrictions on survivor alignment.
   static uint calc_survivor_alignment_bytes() {
     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");

@@ -202,16 +182,16 @@
       return SurvivorAlignmentInBytes;
     }
   }
 
 public:
-  G1ParGCAllocator(G1CollectedHeap* g1h) :
+  G1PLABAllocator(G1CollectedHeap* g1h) :
     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
     _alloc_buffer_waste(0), _undo_waste(0) {
   }
 
-  static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
+  static G1PLABAllocator* create_allocator(G1CollectedHeap* g1h);
 
   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
   size_t undo_waste() {return _undo_waste; }
 
   // Allocate word_sz words in dest, either directly into the regions or by

@@ -224,11 +204,11 @@
   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
   // allocated memory, NULL if not successful.
   HeapWord* plab_allocate(InCSetState dest,
                           size_t word_sz,
                           AllocationContext_t context) {
-    G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
+    G1PLAB* buffer = alloc_buffer(dest, context);
     if (_survivor_alignment_bytes == 0) {
       return buffer->allocate(word_sz);
     } else {
       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
     }

@@ -253,19 +233,21 @@
       add_to_undo_waste(word_sz);
     }
   }
 };
 
-class G1DefaultParGCAllocator : public G1ParGCAllocator {
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
+// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
+// and old generation allocation.
+class G1DefaultPLABAllocator : public G1PLABAllocator {
+  G1PLAB  _surviving_alloc_buffer;
+  G1PLAB  _tenured_alloc_buffer;
+  G1PLAB* _alloc_buffers[InCSetState::Num];
 
 public:
-  G1DefaultParGCAllocator(G1CollectedHeap* g1h);
+  G1DefaultPLABAllocator(G1CollectedHeap* g1h);
 
-  virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+  virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
     assert(dest.is_valid(),
            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
     assert(_alloc_buffers[dest.value()] != NULL,
            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
     return _alloc_buffers[dest.value()];
< prev index next >