1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/g1/g1InCSetState.hpp"
  31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  32 
  33 // Interface to keep track of which regions G1 is currently allocating into and
  34 // allowing access to it (e.g. allocating into them, or getting their occupancy).
  35 // Also keeps track of retained regions across GCs.
  36 class G1Allocator : public CHeapObj<mtGC> {
  37   friend class VMStructs;
  38  protected:
  39   G1CollectedHeap* _g1h;
  40 
  41   virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
  42   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  43   virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
  44 
  45   // Allocation attempt during GC for a survivor object / PLAB.
  46   inline HeapWord* survivor_attempt_allocation(size_t min_word_size,
  47                                                size_t& word_size,
  48                                                AllocationContext_t context);
  49 
  50   // Allocation attempt during GC for an old object / PLAB.
  51   inline HeapWord* old_attempt_allocation(size_t min_word_size,
  52                                           size_t& word_size,
  53                                           AllocationContext_t context);
  54 
  55   void reuse_retained_old_region(EvacuationInfo& evacuation_info,
  56                                  OldGCAllocRegion* old,
  57                                  HeapRegion** retained);
  58 
  59  public:
  60   G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
  61 
  62   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  63 
  64   virtual void init_mutator_alloc_region() = 0;
  65   virtual void release_mutator_alloc_region() = 0;
  66 
  67   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  68   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
  69   virtual void abandon_gc_alloc_regions() = 0;
  70 
  71   size_t desired_plab_size(InCSetState dest);
  72   virtual G1EvacStats* evac_stats(InCSetState dest) = 0;
  73 
  74   // Allocate blocks during garbage collection. Will ensure an allocation region
  75   // is available, either by picking one or getting a new one from the heap,
  76   // and then allocate a block of the given size. The block may not be a humongous -
  77   // it must fit into a single heap region.
  78   HeapWord* par_allocate_during_gc(InCSetState dest,
  79                                    size_t min_word_size,
  80                                    size_t& word_size,
  81                                    AllocationContext_t context);
  82 
  83   HeapWord* par_allocate_during_gc(InCSetState dest, size_t word_size, AllocationContext_t context) {
  84     return par_allocate_during_gc(dest, word_size, word_size, context);
  85   }
  86 
  87   HeapWord* par_allocate_during_mutator(size_t word_size, bool bot_updates, AllocationContext_t context) {
  88     return mutator_alloc_region(context)->attempt_allocation(word_size, bot_updates);
  89   }
  90 
  91   HeapWord* par_allocate_during_mutator_locked(size_t word_size, bool bot_updates, AllocationContext_t context) {
  92     return mutator_alloc_region(context)->attempt_allocation_locked(word_size, bot_updates);
  93   }
  94 
  95   HeapWord* par_allocate_during_mutator_force(size_t word_size, bool bot_updates, AllocationContext_t context) {
  96     return mutator_alloc_region(context)->attempt_allocation_force(word_size, bot_updates);
  97   }
  98 
  99   size_t unsafe_max_tlab_alloc();
 100 
 101   virtual bool is_retained_old_region(HeapRegion* hr) = 0;
 102 
 103   // Returns the amount of memory that is in use by the managed allocation regions.
 104   virtual size_t used_in_alloc_regions() const = 0;
 105 };
 106 
 107 // The default allocation region manager for G1. Provides a single mutator, survivor
 108 // and old generation allocation region.
 109 // Can retain the old generation allocation region across GCs.
 110 class G1DefaultAllocator : public G1Allocator {
 111  private:
 112   // PLAB sizing policy for survivors.
 113   G1EvacStats _survivor_plab_stats;
 114   // PLAB sizing policy for tenured objects.
 115   G1EvacStats _old_plab_stats;
 116 
 117  protected:
 118   // Alloc region used to satisfy mutator allocation requests.
 119   MutatorAllocRegion _mutator_alloc_region;
 120 
 121   // Alloc region used to satisfy allocation requests by the GC for
 122   // survivor objects.
 123   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 124 
 125   // Alloc region used to satisfy allocation requests by the GC for
 126   // old objects.
 127   OldGCAllocRegion _old_gc_alloc_region;
 128 
 129   HeapRegion* _retained_old_gc_alloc_region;
 130 
 131   G1EvacStats* evac_stats(InCSetState dest);
 132 
 133   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 134     return &_mutator_alloc_region;
 135   }
 136 
 137   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 138     return &_survivor_gc_alloc_region;
 139   }
 140 
 141   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 142     return &_old_gc_alloc_region;
 143   }
 144 
 145  public:
 146   G1DefaultAllocator(G1CollectedHeap* heap);
 147 
 148   virtual void init_mutator_alloc_region();
 149   virtual void release_mutator_alloc_region();
 150 
 151   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 152   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 153   virtual void abandon_gc_alloc_regions();
 154 
 155   virtual bool is_retained_old_region(HeapRegion* hr) {
 156     return _retained_old_gc_alloc_region == hr;
 157   }
 158 
 159   virtual size_t used_in_alloc_regions() const {
 160     assert(Heap_lock->owner() != NULL,
 161            "Should be owned on this thread's behalf.");
 162     size_t result = 0;
 163 
 164     // Read only once in case it is set to NULL concurrently
 165     HeapRegion* hr = _mutator_alloc_region.get();
 166     if (hr != NULL) {
 167       result += hr->used();
 168     }
 169     return result;
 170   }
 171 };
 172 
 173 // A PLAB used during garbage collection that is specific to G1.
 174 class G1PLAB: public ParGCAllocBuffer {
 175  private:
 176   bool _retired;
 177 
 178  public:
 179   G1PLAB(size_t gclab_word_size);
 180   virtual ~G1PLAB() {
 181     guarantee(_retired, "Allocation buffer has not been retired");
 182   }
 183 
 184   // The amount of space in words wasted within the PLAB including
 185   // waste due to refills and alignment.
 186   size_t wasted() const { return _wasted; }
 187 
 188   virtual void set_buf(HeapWord* buf) {
 189     ParGCAllocBuffer::set_buf(buf);
 190     _retired = false;
 191   }
 192 
 193   virtual void retire() {
 194     if (_retired) {
 195       return;
 196     }
 197     ParGCAllocBuffer::retire();
 198     _retired = true;
 199   }
 200 };
 201 
 202 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 203 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 204 // statistics.
 205 class PLABAllocator : public CHeapObj<mtGC> {
 206   friend class G1ParScanThreadState;
 207  protected:
 208   G1Allocator* _allocator;
 209 
 210   // The survivor alignment in effect in bytes.
 211   // == 0 : don't align survivors
 212   // != 0 : align survivors to that alignment
 213   // These values were chosen to favor the non-alignment case since some
 214   // architectures have a special compare against zero instructions.
 215   const uint _survivor_alignment_bytes;
 216 
 217   size_t _undo_waste[InCSetState::Num];
 218   size_t _inline_allocated[InCSetState::Num];
 219 
 220   virtual void flush_stats_and_retire() = 0;
 221   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 222 
 223   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 224   // there are no restrictions on survivor alignment.
 225   static uint calc_survivor_alignment_bytes() {
 226     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 227     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 228       // No need to align objects in the survivors differently, return 0
 229       // which means "survivor alignment is not used".
 230       return 0;
 231     } else {
 232       assert(SurvivorAlignmentInBytes > 0, "sanity");
 233       return SurvivorAlignmentInBytes;
 234     }
 235   }
 236 
 237  public:
 238   PLABAllocator(G1Allocator* heap_manager) :
 239     _allocator(heap_manager), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
 240     for (size_t i = 0; i < ARRAY_SIZE(_inline_allocated); i++) {
 241       _inline_allocated[i] = 0;
 242     }
 243     for (size_t i = 0; i < ARRAY_SIZE(_undo_waste); i++) {
 244       _undo_waste[i] = 0;
 245     }
 246   }
 247 
 248   static PLABAllocator* create_allocator(G1Allocator* allocator);
 249 
 250   // Returns the number of words allocated inline for the given state so far.
 251   size_t inline_allocated(InCSetState value) const { return _inline_allocated[value.value()]; }
 252   // Returns the number of words wasted due to und for the given state so far.
 253   size_t lab_undo_waste(InCSetState value) const { return _undo_waste[value.value()]; }
 254   // Returns the number of words wasted due to alignment or LAB refills.
 255   virtual size_t lab_waste(InCSetState value) const = 0;
 256 
 257   // Allocate word_sz words in dest, either directly into the regions or by
 258   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 259   // not successful.
 260   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 261                                         size_t word_sz,
 262                                         AllocationContext_t context);
 263 
 264   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 265   // allocated memory, NULL if not successful.
 266   HeapWord* plab_allocate(InCSetState dest,
 267                           size_t word_sz,
 268                           AllocationContext_t context) {
 269     G1PLAB* buffer = alloc_buffer(dest, context);
 270     if (_survivor_alignment_bytes == 0) {
 271       return buffer->allocate(word_sz);
 272     } else {
 273       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 274     }
 275   }
 276 
 277   HeapWord* allocate(InCSetState dest, size_t word_sz,
 278                      AllocationContext_t context) {
 279     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 280     if (obj != NULL) {
 281       return obj;
 282     }
 283     return allocate_direct_or_new_plab(dest, word_sz, context);
 284   }
 285 
 286   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
 287 };
 288 
 289 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 290 // and old generation allocation.
 291 class DefaultPLABAllocator : public PLABAllocator {
 292   G1PLAB  _surviving_alloc_buffer;
 293   G1PLAB  _tenured_alloc_buffer;
 294   G1PLAB* _alloc_buffers[InCSetState::Num];
 295 
 296  public:
 297   DefaultPLABAllocator(G1Allocator* allocator);
 298 
 299   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 300     assert(dest.is_valid(),
 301            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 302     assert(_alloc_buffers[dest.value()] != NULL,
 303            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 304     return _alloc_buffers[dest.value()];
 305   }
 306 
 307   virtual size_t lab_waste(InCSetState value) const;
 308 
 309   virtual void flush_stats_and_retire() ;
 310 };
 311 
 312 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP