1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/g1/g1InCSetState.hpp"
  31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  32 
  33 // Interface to keep track of which regions G1 is currently allocating into and
  34 // allowing access to it (e.g. allocating into them, or getting their occupancy).
  35 // Also keeps track of retained regions across GCs.
  36 class G1Allocator : public CHeapObj<mtGC> {
  37   friend class VMStructs;
  38 protected:
  39   G1CollectedHeap* _g1h;
  40 
  41 public:
  42   G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
  43 
  44   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  45 
  46   virtual void init_mutator_alloc_region() = 0;
  47   virtual void release_mutator_alloc_region() = 0;
  48 
  49   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  50   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
  51   virtual void abandon_gc_alloc_regions() = 0;
  52 
  53   virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
  54   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  55   virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
  56 
  57   virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
  58   void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
  59                                                            OldGCAllocRegion* old,
  60                                                            HeapRegion** retained);
  61 
  62   // Returns the amount of memory that is in use by the managed allocation regions.
  63   virtual size_t                 used_in_alloc_regions() const = 0;
  64 };
  65 
  66 // The default allocation region manager for G1. Provides a single mutator, survivor
  67 // and old generation allocation region.
  68 // Can retain the old generation allocation region across GCs.
  69 class G1DefaultAllocator : public G1Allocator {
  70 protected:
  71   // Alloc region used to satisfy mutator allocation requests.
  72   MutatorAllocRegion _mutator_alloc_region;
  73 
  74   // Alloc region used to satisfy allocation requests by the GC for
  75   // survivor objects.
  76   SurvivorGCAllocRegion _survivor_gc_alloc_region;
  77 
  78   // Alloc region used to satisfy allocation requests by the GC for
  79   // old objects.
  80   OldGCAllocRegion _old_gc_alloc_region;
  81 
  82   HeapRegion* _retained_old_gc_alloc_region;
  83 public:
  84   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
  85 
  86   virtual void init_mutator_alloc_region();
  87   virtual void release_mutator_alloc_region();
  88 
  89   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
  90   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
  91   virtual void abandon_gc_alloc_regions();
  92 
  93   virtual bool is_retained_old_region(HeapRegion* hr) {
  94     return _retained_old_gc_alloc_region == hr;
  95   }
  96 
  97   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
  98     return &_mutator_alloc_region;
  99   }
 100 
 101   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 102     return &_survivor_gc_alloc_region;
 103   }
 104 
 105   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 106     return &_old_gc_alloc_region;
 107   }
 108 
 109   virtual size_t used_in_alloc_regions() const {
 110     assert(Heap_lock->owner() != NULL,
 111            "Should be owned on this thread's behalf.");
 112     size_t result = 0;
 113 
 114     // Read only once in case it is set to NULL concurrently
 115     HeapRegion* hr = _mutator_alloc_region.get();
 116     if (hr != NULL) {
 117       result += hr->used();
 118     }
 119     return result;
 120   }
 121 };
 122 
 123 // A PLAB used during garbage collection that is specific to G1.
 124 class G1PLAB: public ParGCAllocBuffer {
 125 private:
 126   bool _retired;
 127 
 128 public:
 129   G1PLAB(size_t gclab_word_size);
 130   virtual ~G1PLAB() {
 131     guarantee(_retired, "Allocation buffer has not been retired");
 132   }
 133 
 134   virtual void set_buf(HeapWord* buf) {
 135     ParGCAllocBuffer::set_buf(buf);
 136     _retired = false;
 137   }
 138 
 139   virtual void retire() {
 140     if (_retired) {
 141       return;
 142     }
 143     ParGCAllocBuffer::retire();
 144     _retired = true;
 145   }
 146 };
 147 
 148 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 149 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 150 // statistics.
 151 class G1PLABAllocator : public CHeapObj<mtGC> {
 152   friend class G1ParScanThreadState;
 153 protected:
 154   G1CollectedHeap* _g1h;
 155 
 156   // The survivor alignment in effect in bytes.
 157   // == 0 : don't align survivors
 158   // != 0 : align survivors to that alignment
 159   // These values were chosen to favor the non-alignment case since some
 160   // architectures have a special compare against zero instructions.
 161   const uint _survivor_alignment_bytes;
 162 
 163   size_t _alloc_buffer_waste;
 164   size_t _undo_waste;
 165 
 166   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 167   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 168 
 169   virtual void retire_alloc_buffers() = 0;
 170   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 171 
 172   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 173   // there are no restrictions on survivor alignment.
 174   static uint calc_survivor_alignment_bytes() {
 175     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 176     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 177       // No need to align objects in the survivors differently, return 0
 178       // which means "survivor alignment is not used".
 179       return 0;
 180     } else {
 181       assert(SurvivorAlignmentInBytes > 0, "sanity");
 182       return SurvivorAlignmentInBytes;
 183     }
 184   }
 185 
 186 public:
 187   G1PLABAllocator(G1CollectedHeap* g1h) :
 188     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
 189     _alloc_buffer_waste(0), _undo_waste(0) {
 190   }
 191 
 192   static G1PLABAllocator* create_allocator(G1CollectedHeap* g1h);
 193 
 194   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 195   size_t undo_waste() {return _undo_waste; }
 196 
 197   // Allocate word_sz words in dest, either directly into the regions or by
 198   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 199   // not successful.
 200   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 201                                         size_t word_sz,
 202                                         AllocationContext_t context);
 203 
 204   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 205   // allocated memory, NULL if not successful.
 206   HeapWord* plab_allocate(InCSetState dest,
 207                           size_t word_sz,
 208                           AllocationContext_t context) {
 209     G1PLAB* buffer = alloc_buffer(dest, context);
 210     if (_survivor_alignment_bytes == 0) {
 211       return buffer->allocate(word_sz);
 212     } else {
 213       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 214     }
 215   }
 216 
 217   HeapWord* allocate(InCSetState dest, size_t word_sz,
 218                      AllocationContext_t context) {
 219     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 220     if (obj != NULL) {
 221       return obj;
 222     }
 223     return allocate_direct_or_new_plab(dest, word_sz, context);
 224   }
 225 
 226   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 227     if (alloc_buffer(dest, context)->contains(obj)) {
 228       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 229              "should contain whole object");
 230       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 231     } else {
 232       CollectedHeap::fill_with_object(obj, word_sz);
 233       add_to_undo_waste(word_sz);
 234     }
 235   }
 236 };
 237 
 238 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 239 // and old generation allocation.
 240 class G1DefaultPLABAllocator : public G1PLABAllocator {
 241   G1PLAB  _surviving_alloc_buffer;
 242   G1PLAB  _tenured_alloc_buffer;
 243   G1PLAB* _alloc_buffers[InCSetState::Num];
 244 
 245 public:
 246   G1DefaultPLABAllocator(G1CollectedHeap* g1h);
 247 
 248   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 249     assert(dest.is_valid(),
 250            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 251     assert(_alloc_buffers[dest.value()] != NULL,
 252            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 253     return _alloc_buffers[dest.value()];
 254   }
 255 
 256   virtual void retire_alloc_buffers() ;
 257 };
 258 
 259 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP