1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc/g1/g1AllocRegion.hpp"
  29 #include "gc/g1/g1AllocationContext.hpp"
  30 #include "gc/g1/g1InCSetState.hpp"
  31 #include "gc/shared/collectedHeap.hpp"
  32 #include "gc/shared/plab.hpp"
  33 
  34 class EvacuationInfo;
  35 
  36 // Base class for G1 allocators.
  37 class G1Allocator : public CHeapObj<mtGC> {
  38   friend class VMStructs;
  39 protected:
  40   G1CollectedHeap* _g1h;
  41 
  42   // Outside of GC pauses, the number of bytes used in all regions other
  43   // than the current allocation region.
  44   size_t _summary_bytes_used;
  45 
  46 public:
  47    G1Allocator(G1CollectedHeap* heap) :
  48      _g1h(heap), _summary_bytes_used(0) { }
  49 
  50    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  51 
  52    virtual void init_mutator_alloc_region() = 0;
  53    virtual void release_mutator_alloc_region() = 0;
  54 
  55    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  56    virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
  57    virtual void abandon_gc_alloc_regions() = 0;
  58 
  59    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
  60    virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  61    virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
  62    virtual size_t                 used() = 0;
  63    virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
  64 
  65    void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
  66                                                             OldGCAllocRegion* old,
  67                                                             HeapRegion** retained);
  68 
  69    size_t used_unlocked() const {
  70      return _summary_bytes_used;
  71    }
  72 
  73    void increase_used(size_t bytes) {
  74      _summary_bytes_used += bytes;
  75    }
  76 
  77    void decrease_used(size_t bytes) {
  78      assert(_summary_bytes_used >= bytes,
  79             err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
  80                 _summary_bytes_used, bytes));
  81      _summary_bytes_used -= bytes;
  82    }
  83 
  84    void set_used(size_t bytes) {
  85      _summary_bytes_used = bytes;
  86    }
  87 
  88    virtual HeapRegion* new_heap_region(uint hrs_index,
  89                                        G1BlockOffsetSharedArray* sharedOffsetArray,
  90                                        MemRegion mr) {
  91      return new HeapRegion(hrs_index, sharedOffsetArray, mr);
  92    }
  93 };
  94 
  95 // The default allocator for G1.
  96 class G1DefaultAllocator : public G1Allocator {
  97 protected:
  98   // Alloc region used to satisfy mutator allocation requests.
  99   MutatorAllocRegion _mutator_alloc_region;
 100 
 101   // Alloc region used to satisfy allocation requests by the GC for
 102   // survivor objects.
 103   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 104 
 105   // Alloc region used to satisfy allocation requests by the GC for
 106   // old objects.
 107   OldGCAllocRegion _old_gc_alloc_region;
 108 
 109   HeapRegion* _retained_old_gc_alloc_region;
 110 public:
 111   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
 112 
 113   virtual void init_mutator_alloc_region();
 114   virtual void release_mutator_alloc_region();
 115 
 116   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 117   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 118   virtual void abandon_gc_alloc_regions();
 119 
 120   virtual bool is_retained_old_region(HeapRegion* hr) {
 121     return _retained_old_gc_alloc_region == hr;
 122   }
 123 
 124   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 125     return &_mutator_alloc_region;
 126   }
 127 
 128   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 129     return &_survivor_gc_alloc_region;
 130   }
 131 
 132   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 133     return &_old_gc_alloc_region;
 134   }
 135 
 136   virtual size_t used() {
 137     assert(Heap_lock->owner() != NULL,
 138            "Should be owned on this thread's behalf.");
 139     size_t result = _summary_bytes_used;
 140 
 141     // Read only once in case it is set to NULL concurrently
 142     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 143     if (hr != NULL) {
 144       result += hr->used();
 145     }
 146     return result;
 147   }
 148 };
 149 
 150 class G1PLAB: public PLAB {
 151 private:
 152   bool _retired;
 153 
 154 public:
 155   G1PLAB(size_t gclab_word_size);
 156   virtual ~G1PLAB() {
 157     guarantee(_retired, "Allocation buffer has not been retired");
 158   }
 159 
 160   virtual void set_buf(HeapWord* buf) {
 161     PLAB::set_buf(buf);
 162     _retired = false;
 163   }
 164 
 165   virtual void retire() {
 166     if (_retired) {
 167       return;
 168     }
 169     PLAB::retire();
 170     _retired = true;
 171   }
 172 
 173   virtual void flush_and_retire_stats(PLABStats* stats) {
 174     PLAB::flush_and_retire_stats(stats);
 175     _retired = true;
 176   }
 177 };
 178 
 179 class G1ParGCAllocator : public CHeapObj<mtGC> {
 180   friend class G1ParScanThreadState;
 181 protected:
 182   G1CollectedHeap* _g1h;
 183 
 184   // The survivor alignment in effect in bytes.
 185   // == 0 : don't align survivors
 186   // != 0 : align survivors to that alignment
 187   // These values were chosen to favor the non-alignment case since some
 188   // architectures have a special compare against zero instructions.
 189   const uint _survivor_alignment_bytes;
 190 
 191   virtual void retire_alloc_buffers() = 0;
 192   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 193 
 194   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 195   // there are no restrictions on survivor alignment.
 196   static uint calc_survivor_alignment_bytes() {
 197     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 198     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 199       // No need to align objects in the survivors differently, return 0
 200       // which means "survivor alignment is not used".
 201       return 0;
 202     } else {
 203       assert(SurvivorAlignmentInBytes > 0, "sanity");
 204       return SurvivorAlignmentInBytes;
 205     }
 206   }
 207 
 208 public:
 209   G1ParGCAllocator(G1CollectedHeap* g1h) :
 210     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { }
 211   virtual ~G1ParGCAllocator() { }
 212 
 213   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 214 
 215   virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
 216 
 217   // Allocate word_sz words in dest, either directly into the regions or by
 218   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 219   // not successful.
 220   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 221                                         size_t word_sz,
 222                                         AllocationContext_t context);
 223 
 224   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 225   // allocated memory, NULL if not successful.
 226   HeapWord* plab_allocate(InCSetState dest,
 227                           size_t word_sz,
 228                           AllocationContext_t context) {
 229     G1PLAB* buffer = alloc_buffer(dest, context);
 230     if (_survivor_alignment_bytes == 0) {
 231       return buffer->allocate(word_sz);
 232     } else {
 233       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 234     }
 235   }
 236 
 237   HeapWord* allocate(InCSetState dest, size_t word_sz,
 238                      AllocationContext_t context) {
 239     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 240     if (obj != NULL) {
 241       return obj;
 242     }
 243     return allocate_direct_or_new_plab(dest, word_sz, context);
 244   }
 245 
 246   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 247     alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 248   }
 249 };
 250 
 251 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 252   G1PLAB  _surviving_alloc_buffer;
 253   G1PLAB  _tenured_alloc_buffer;
 254   G1PLAB* _alloc_buffers[InCSetState::Num];
 255 
 256 public:
 257   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 258 
 259   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 260     assert(dest.is_valid(),
 261            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
 262     assert(_alloc_buffers[dest.value()] != NULL,
 263            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
 264     return _alloc_buffers[dest.value()];
 265   }
 266 
 267   virtual void retire_alloc_buffers();
 268 
 269   virtual void waste(size_t& wasted, size_t& undo_wasted);
 270 };
 271 
 272 class G1ArchiveAllocator : public CHeapObj<mtGC> {
 273 
 274 protected:
 275   G1CollectedHeap* _g1h;
 276 
 277   // The current allocation region
 278   HeapRegion* _allocation_region;
 279 
 280   // Regions allocated for the current archive range.
 281   GrowableArray<HeapRegion*> _allocated_regions;
 282 
 283   // The number of bytes used in the current range.
 284   size_t _summary_bytes_used;
 285 
 286   // Current allocation window within the current region.
 287   HeapWord* _bottom;
 288   HeapWord* _top;
 289   HeapWord* _max;
 290 
 291   // Allocate a new region for this archive allocator.
 292   // Allocation is from the top of the reserved heap downward.
 293   HeapRegion* alloc_new_region();
 294 
 295 public:
 296   G1ArchiveAllocator(G1CollectedHeap* g1h) :
 297     _g1h(g1h), 
 298     _allocation_region(NULL), 
 299     _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
 300                                                          ResourceObj::C_HEAP),
 301                         2), true /* C_Heap */),
 302     _summary_bytes_used(0) { }
 303 
 304   ~G1ArchiveAllocator() {
 305     assert(_allocation_region == NULL, "_allocation_region not NULL");
 306   }
 307 
 308   static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
 309 
 310   // Allocate memory for an individual object. 
 311   HeapWord* archive_mem_allocate(size_t word_size);
 312 
 313   // Return the memory ranges used in the current archive, after
 314   // aligning to the requested alignment. 
 315   void complete_archive(GrowableArray<MemRegion>* ranges, 
 316                         uint end_alignment);
 317 
 318   // The number of bytes allocated by this allocator.
 319   size_t used() {
 320     return _summary_bytes_used;
 321   }
 322 
 323   // Clear the count of bytes allocated in prior G1 regions.  This
 324   // must be done when recalculate_use is used to reset the counter
 325   // for the generic allocator, since it counts bytes in all G1
 326   // regions, including those still associated with this allocator.
 327   void clear_used() {
 328     _summary_bytes_used = 0;
 329   }
 330 
 331 };
 332 
 333 #endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP