1 /*
   2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  31 
  32 enum GCAllocPurpose {
  33   GCAllocForTenured,
  34   GCAllocForSurvived,
  35   GCAllocPurposeCount
  36 };
  37 
  38 // Base class for G1 allocators.
  39 class G1Allocator : public CHeapObj<mtGC> {
  40   friend class VMStructs;
  41 protected:
  42   G1CollectedHeap* _g1h;
  43 
  44   // Outside of GC pauses, the number of bytes used in all regions other
  45   // than the current allocation region.
  46   size_t _summary_bytes_used;
  47 
  48 public:
  49    G1Allocator(G1CollectedHeap* heap) :
  50      _g1h(heap), _summary_bytes_used(0) { }
  51 
  52    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  53 
  54    virtual void init_mutator_alloc_region() = 0;
  55    virtual void release_mutator_alloc_region() = 0;
  56 
  57    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  58    virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
  59    virtual void abandon_gc_alloc_regions() = 0;
  60 
  61    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
  62    virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  63    virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
  64    virtual size_t                 used() = 0;
  65    virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
  66 
  67    void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
  68                                                             OldGCAllocRegion* old,
  69                                                             HeapRegion** retained);
  70 
  71    size_t used_unlocked() const {
  72      return _summary_bytes_used;
  73    }
  74 
  75    void increase_used(size_t bytes) {
  76      _summary_bytes_used += bytes;
  77    }
  78 
  79    void decrease_used(size_t bytes) {
  80      assert(_summary_bytes_used >= bytes,
  81             err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
  82                 _summary_bytes_used, bytes));
  83      _summary_bytes_used -= bytes;
  84    }
  85 
  86    void set_used(size_t bytes) {
  87      _summary_bytes_used = bytes;
  88    }
  89 
  90    virtual HeapRegion* new_heap_region(uint hrs_index,
  91                                        G1BlockOffsetSharedArray* sharedOffsetArray,
  92                                        MemRegion mr) {
  93      return new HeapRegion(hrs_index, sharedOffsetArray, mr);
  94    }
  95 };
  96 
  97 // The default allocator for G1.
  98 class G1DefaultAllocator : public G1Allocator {
  99 protected:
 100   // Alloc region used to satisfy mutator allocation requests.
 101   MutatorAllocRegion _mutator_alloc_region;
 102 
 103   // Alloc region used to satisfy allocation requests by the GC for
 104   // survivor objects.
 105   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 106 
 107   // Alloc region used to satisfy allocation requests by the GC for
 108   // old objects.
 109   OldGCAllocRegion _old_gc_alloc_region;
 110 
 111   HeapRegion* _retained_old_gc_alloc_region;
 112 public:
 113   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
 114 
 115   virtual void init_mutator_alloc_region();
 116   virtual void release_mutator_alloc_region();
 117 
 118   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 119   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 120   virtual void abandon_gc_alloc_regions();
 121 
 122   virtual bool is_retained_old_region(HeapRegion* hr) {
 123     return _retained_old_gc_alloc_region == hr;
 124   }
 125 
 126   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 127     return &_mutator_alloc_region;
 128   }
 129 
 130   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 131     return &_survivor_gc_alloc_region;
 132   }
 133 
 134   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 135     return &_old_gc_alloc_region;
 136   }
 137 
 138   virtual size_t used() {
 139     assert(Heap_lock->owner() != NULL,
 140            "Should be owned on this thread's behalf.");
 141     size_t result = _summary_bytes_used;
 142 
 143     // Read only once in case it is set to NULL concurrently
 144     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 145     if (hr != NULL) {
 146       result += hr->used();
 147     }
 148     return result;
 149   }
 150 };
 151 
 152 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
 153 private:
 154   bool _retired;
 155 
 156 public:
 157   G1ParGCAllocBuffer(size_t gclab_word_size);
 158   virtual ~G1ParGCAllocBuffer() {
 159     guarantee(_retired, "Allocation buffer has not been retired");
 160   }
 161 
 162   virtual void set_buf(HeapWord* buf) {
 163     ParGCAllocBuffer::set_buf(buf);
 164     _retired = false;
 165   }
 166 
 167   virtual void retire(bool end_of_gc, bool retain) {
 168     if (_retired) {
 169       return;
 170     }
 171     ParGCAllocBuffer::retire(end_of_gc, retain);
 172     _retired = true;
 173   }
 174 };
 175 
 176 class G1ParGCAllocator : public CHeapObj<mtGC> {
 177   friend class G1ParScanThreadState;
 178 protected:
 179   G1CollectedHeap* _g1h;
 180 
 181   size_t _alloc_buffer_waste;
 182   size_t _undo_waste;
 183 
 184   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 185   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 186 
 187   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
 188 
 189   virtual void retire_alloc_buffers() = 0;
 190   virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
 191 
 192 public:
 193   G1ParGCAllocator(G1CollectedHeap* g1h) :
 194     _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
 195   }
 196 
 197   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 198 
 199   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 200   size_t undo_waste() {return _undo_waste; }
 201 
 202   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
 203     HeapWord* obj = NULL;
 204     if (purpose == GCAllocForSurvived) {
 205       obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
 206     } else {
 207       obj = alloc_buffer(purpose, context)->allocate(word_sz);
 208     }
 209     if (obj != NULL) {
 210       return obj;
 211     }
 212     return allocate_slow(purpose, word_sz, context);
 213   }
 214 
 215   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 216     if (alloc_buffer(purpose, context)->contains(obj)) {
 217       assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
 218              "should contain whole object");
 219       alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
 220     } else {
 221       CollectedHeap::fill_with_object(obj, word_sz);
 222       add_to_undo_waste(word_sz);
 223     }
 224   }
 225 };
 226 
 227 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 228   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 229   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 230   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
 231 
 232 public:
 233   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 234 
 235   virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
 236     return _alloc_buffers[purpose];
 237   }
 238 
 239   virtual void retire_alloc_buffers() ;
 240 };
 241 
 242 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP