1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc/g1/g1AllocRegion.hpp"
  29 #include "gc/g1/g1AllocationContext.hpp"
  30 #include "gc/g1/g1InCSetState.hpp"
  31 #include "gc/shared/collectedHeap.hpp"
  32 #include "gc/shared/plab.hpp"
  33 
  34 class EvacuationInfo;
  35 
  36 // Interface to keep track of which regions G1 is currently allocating into. Provides
  37 // some accessors (e.g. allocating into them, or getting their occupancy).
  38 // Also keeps track of retained regions across GCs.
  39 class G1Allocator : public CHeapObj<mtGC> {
  40   friend class VMStructs;
  41 protected:
  42   G1CollectedHeap* _g1h;
  43 
  44   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
  45 
  46   // Accessors to the allocation regions.
  47   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  48   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
  49 
  50   // Allocation attempt during GC for a survivor object / PLAB.
  51   inline HeapWord* survivor_attempt_allocation(size_t word_size,
  52                                                AllocationContext_t context);
  53   // Allocation attempt during GC for an old object / PLAB.
  54   inline HeapWord* old_attempt_allocation(size_t word_size,
  55                                           AllocationContext_t context);
  56 public:
  57   G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
  58   virtual ~G1Allocator() { }
  59 
  60   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  61 
  62 #ifdef ASSERT
  63   // Do we currently have an active mutator region to allocate into?
  64   bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
  65 #endif
  66   virtual void init_mutator_alloc_region() = 0;
  67   virtual void release_mutator_alloc_region() = 0;
  68 
  69   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  70   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  71   virtual void abandon_gc_alloc_regions() = 0;
  72 
  73   // Management of retained regions.
  74 
  75   virtual bool is_retained_old_region(HeapRegion* hr) = 0;
  76   void reuse_retained_old_region(EvacuationInfo& evacuation_info,
  77                                  OldGCAllocRegion* old,
  78                                  HeapRegion** retained);
  79  
  80   // Allocate blocks of memory during mutator time.
  81 
  82   inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
  83   inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
  84   inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
  85 
  86   size_t unsafe_max_tlab_alloc(AllocationContext_t context);
  87   
  88   // Allocate blocks of memory during garbage collection. Will ensure an
  89   // allocation region, either by picking one or expanding the
  90   // heap, and then allocate a block of the given size. The block
  91   // may not be a humongous - it must fit into a single heap region.
  92   HeapWord* par_allocate_during_gc(InCSetState dest,
  93                                    size_t word_size,
  94                                    AllocationContext_t context);
  95 
  96   virtual size_t used_in_alloc_regions() = 0;
  97 };
  98 
  99 // The default allocation region manager for G1. Provides a single mutator, survivor
 100 // and old generation allocation region.
 101 // Can retain the (single) old generation allocation region across GCs.
 102 class G1DefaultAllocator : public G1Allocator {
 103 protected:
 104   // Alloc region used to satisfy mutator allocation requests.
 105   MutatorAllocRegion _mutator_alloc_region;
 106 
 107   // Alloc region used to satisfy allocation requests by the GC for
 108   // survivor objects.
 109   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 110 
 111   // Alloc region used to satisfy allocation requests by the GC for
 112   // old objects.
 113   OldGCAllocRegion _old_gc_alloc_region;
 114 
 115   HeapRegion* _retained_old_gc_alloc_region;
 116 public:
 117   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
 118 
 119   virtual void init_mutator_alloc_region();
 120   virtual void release_mutator_alloc_region();
 121 
 122   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 123   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
 124   virtual void abandon_gc_alloc_regions();
 125 
 126   virtual bool is_retained_old_region(HeapRegion* hr) {
 127     return _retained_old_gc_alloc_region == hr;
 128   }
 129 
 130   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 131     return &_mutator_alloc_region;
 132   }
 133 
 134   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 135     return &_survivor_gc_alloc_region;
 136   }
 137 
 138   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 139     return &_old_gc_alloc_region;
 140   }
 141 
 142   virtual size_t used_in_alloc_regions() {
 143     assert(Heap_lock->owner() != NULL,
 144            "Should be owned on this thread's behalf.");
 145     size_t result = 0;
 146 
 147     // Read only once in case it is set to NULL concurrently
 148     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 149     if (hr != NULL) {
 150       result += hr->used();
 151     }
 152     return result;
 153   }
 154 };
 155 
 156 class G1PLAB: public PLAB {
 157 private:
 158   bool _retired;
 159 
 160 public:
 161   G1PLAB(size_t gclab_word_size);
 162   virtual ~G1PLAB() {
 163     guarantee(_retired, "Allocation buffer has not been retired");
 164   }
 165 
 166   virtual void set_buf(HeapWord* buf) {
 167     PLAB::set_buf(buf);
 168     _retired = false;
 169   }
 170 
 171   virtual void retire() {
 172     if (_retired) {
 173       return;
 174     }
 175     PLAB::retire();
 176     _retired = true;
 177   }
 178 
 179   virtual void flush_and_retire_stats(PLABStats* stats) {
 180     PLAB::flush_and_retire_stats(stats);
 181     _retired = true;
 182   }
 183 };
 184 
 185 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 186 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 187 // statistics.
 188 class G1PLABAllocator : public CHeapObj<mtGC> {
 189   friend class G1ParScanThreadState;
 190 protected:
 191   G1CollectedHeap* _g1h;
 192   G1Allocator* _allocator;
 193 
 194   // The survivor alignment in effect in bytes.
 195   // == 0 : don't align survivors
 196   // != 0 : align survivors to that alignment
 197   // These values were chosen to favor the non-alignment case since some
 198   // architectures have a special compare against zero instructions.
 199   const uint _survivor_alignment_bytes;
 200 
 201   virtual void retire_alloc_buffers() = 0;
 202   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 203 
 204   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 205   // there are no restrictions on survivor alignment.
 206   static uint calc_survivor_alignment_bytes() {
 207     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 208     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 209       // No need to align objects in the survivors differently, return 0
 210       // which means "survivor alignment is not used".
 211       return 0;
 212     } else {
 213       assert(SurvivorAlignmentInBytes > 0, "sanity");
 214       return SurvivorAlignmentInBytes;
 215     }
 216   }
 217 
 218 public:
 219   G1PLABAllocator(G1Allocator* allocator);
 220   virtual ~G1PLABAllocator() { }
 221 
 222   static G1PLABAllocator* create_allocator(G1Allocator* allocator);
 223 
 224   virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
 225 
 226   // Allocate word_sz words in dest, either directly into the regions or by
 227   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 228   // not successful.
 229   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 230                                         size_t word_sz,
 231                                         AllocationContext_t context);
 232 
 233   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 234   // allocated memory, NULL if not successful.
 235   HeapWord* plab_allocate(InCSetState dest,
 236                           size_t word_sz,
 237                           AllocationContext_t context) {
 238     G1PLAB* buffer = alloc_buffer(dest, context);
 239     if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
 240       return buffer->allocate(word_sz);
 241     } else {
 242       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 243     }
 244   }
 245 
 246   HeapWord* allocate(InCSetState dest, size_t word_sz,
 247                      AllocationContext_t context) {
 248     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 249     if (obj != NULL) {
 250       return obj;
 251     }
 252     return allocate_direct_or_new_plab(dest, word_sz, context);
 253   }
 254 
 255   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
 256 };
 257 
 258 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 259 // and old generation allocation.
 260 class G1DefaultPLABAllocator : public G1PLABAllocator {
 261   G1PLAB  _surviving_alloc_buffer;
 262   G1PLAB  _tenured_alloc_buffer;
 263   G1PLAB* _alloc_buffers[InCSetState::Num];
 264 
 265 public:
 266   G1DefaultPLABAllocator(G1Allocator* _allocator);
 267 
 268   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 269     assert(dest.is_valid(),
 270            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 271     assert(_alloc_buffers[dest.value()] != NULL,
 272            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 273     return _alloc_buffers[dest.value()];
 274   }
 275 
 276   virtual void retire_alloc_buffers();
 277 
 278   virtual void waste(size_t& wasted, size_t& undo_wasted);
 279 };
 280 
 281 // G1ArchiveAllocator is used to allocate memory in archive
 282 // regions. Such regions are not modifiable by GC, being neither
 283 // scavenged nor compacted, or even marked in the object header.
 284 // They can contain no pointers to non-archive heap regions,
 285 class G1ArchiveAllocator : public CHeapObj<mtGC> {
 286 
 287 protected:
 288   G1CollectedHeap* _g1h;
 289 
 290   // The current allocation region
 291   HeapRegion* _allocation_region;
 292 
 293   // Regions allocated for the current archive range.
 294   GrowableArray<HeapRegion*> _allocated_regions;
 295 
 296   // The number of bytes used in the current range.
 297   size_t _summary_bytes_used;
 298 
 299   // Current allocation window within the current region.
 300   HeapWord* _bottom;
 301   HeapWord* _top;
 302   HeapWord* _max;
 303 
 304   // Allocate a new region for this archive allocator.
 305   // Allocation is from the top of the reserved heap downward.
 306   bool alloc_new_region();
 307 
 308 public:
 309   G1ArchiveAllocator(G1CollectedHeap* g1h) :
 310     _g1h(g1h),
 311     _allocation_region(NULL),
 312     _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
 313                                                          ResourceObj::C_HEAP),
 314                         2), true /* C_Heap */),
 315     _summary_bytes_used(0),
 316     _bottom(NULL),
 317     _top(NULL),
 318     _max(NULL) { }
 319 
 320   virtual ~G1ArchiveAllocator() {
 321     assert(_allocation_region == NULL, "_allocation_region not NULL");
 322   }
 323 
 324   static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
 325 
 326   // Allocate memory for an individual object.
 327   HeapWord* archive_mem_allocate(size_t word_size);
 328 
 329   // Return the memory ranges used in the current archive, after
 330   // aligning to the requested alignment.
 331   void complete_archive(GrowableArray<MemRegion>* ranges,
 332                         size_t end_alignment_in_bytes);
 333 
 334   // The number of bytes allocated by this allocator.
 335   size_t used() {
 336     return _summary_bytes_used;
 337   }
 338 
 339   // Clear the count of bytes allocated in prior G1 regions. This
 340   // must be done when recalculate_use is used to reset the counter
 341   // for the generic allocator, since it counts bytes in all G1
 342   // regions, including those still associated with this allocator.
 343   void clear_used() {
 344     _summary_bytes_used = 0;
 345   }
 346 
 347 };
 348 
 349 #endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP