1 /*
   2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc_implementation/g1/g1AllocationContext.hpp"
  29 #include "gc_implementation/g1/g1AllocRegion.hpp"
  30 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  31 
  32 typedef int8_t in_cset_state_t;
  33 
  34 // Helper class used to examine in_cset_t values.
  35 class InCSetState : AllStatic {
  36 public:
  37   enum {
  38     // Values <0 mean the region is a humongous region.
  39     NotInCSet    = 0,     // The region is not in the collection set.
  40     Young        = 1,     // The region is in the collection set and a young region.
  41     Old          = 2,     // The region is in the collection set and an old region.
  42     Num
  43   };
  44 
  45   static in_cset_state_t humongous() { return -1; }
  46 
  47   static bool is_not_in_cset(in_cset_state_t state) { return state == NotInCSet; }
  48   static bool is_in_cset_or_humongous(in_cset_state_t state) { return state != NotInCSet; }
  49   static bool is_in_cset(in_cset_state_t state) { return state > NotInCSet; }
  50   static bool is_humongous(in_cset_state_t state) { return state < NotInCSet; }
  51 };
  52 
  53 // Base class for G1 allocators.
  54 class G1Allocator : public CHeapObj<mtGC> {
  55   friend class VMStructs;
  56 protected:
  57   G1CollectedHeap* _g1h;
  58 
  59   // Outside of GC pauses, the number of bytes used in all regions other
  60   // than the current allocation region.
  61   size_t _summary_bytes_used;
  62 
  63 public:
  64    G1Allocator(G1CollectedHeap* heap) :
  65      _g1h(heap), _summary_bytes_used(0) { }
  66 
  67    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
  68 
  69    virtual void init_mutator_alloc_region() = 0;
  70    virtual void release_mutator_alloc_region() = 0;
  71 
  72    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
  73    virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
  74    virtual void abandon_gc_alloc_regions() = 0;
  75 
  76    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
  77    virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  78    virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
  79    virtual size_t                 used() = 0;
  80    virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
  81 
  82    void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
  83                                                             OldGCAllocRegion* old,
  84                                                             HeapRegion** retained);
  85 
  86    size_t used_unlocked() const {
  87      return _summary_bytes_used;
  88    }
  89 
  90    void increase_used(size_t bytes) {
  91      _summary_bytes_used += bytes;
  92    }
  93 
  94    void decrease_used(size_t bytes) {
  95      assert(_summary_bytes_used >= bytes,
  96             err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
  97                 _summary_bytes_used, bytes));
  98      _summary_bytes_used -= bytes;
  99    }
 100 
 101    void set_used(size_t bytes) {
 102      _summary_bytes_used = bytes;
 103    }
 104 
 105    virtual HeapRegion* new_heap_region(uint hrs_index,
 106                                        G1BlockOffsetSharedArray* sharedOffsetArray,
 107                                        MemRegion mr) {
 108      return new HeapRegion(hrs_index, sharedOffsetArray, mr);
 109    }
 110 };
 111 
 112 // The default allocator for G1.
 113 class G1DefaultAllocator : public G1Allocator {
 114 protected:
 115   // Alloc region used to satisfy mutator allocation requests.
 116   MutatorAllocRegion _mutator_alloc_region;
 117 
 118   // Alloc region used to satisfy allocation requests by the GC for
 119   // survivor objects.
 120   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 121 
 122   // Alloc region used to satisfy allocation requests by the GC for
 123   // old objects.
 124   OldGCAllocRegion _old_gc_alloc_region;
 125 
 126   HeapRegion* _retained_old_gc_alloc_region;
 127 public:
 128   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
 129 
 130   virtual void init_mutator_alloc_region();
 131   virtual void release_mutator_alloc_region();
 132 
 133   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 134   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
 135   virtual void abandon_gc_alloc_regions();
 136 
 137   virtual bool is_retained_old_region(HeapRegion* hr) {
 138     return _retained_old_gc_alloc_region == hr;
 139   }
 140 
 141   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 142     return &_mutator_alloc_region;
 143   }
 144 
 145   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 146     return &_survivor_gc_alloc_region;
 147   }
 148 
 149   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 150     return &_old_gc_alloc_region;
 151   }
 152 
 153   virtual size_t used() {
 154     assert(Heap_lock->owner() != NULL,
 155            "Should be owned on this thread's behalf.");
 156     size_t result = _summary_bytes_used;
 157 
 158     // Read only once in case it is set to NULL concurrently
 159     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 160     if (hr != NULL) {
 161       result += hr->used();
 162     }
 163     return result;
 164   }
 165 };
 166 
 167 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
 168 private:
 169   bool _retired;
 170 
 171 public:
 172   G1ParGCAllocBuffer(size_t gclab_word_size);
 173   virtual ~G1ParGCAllocBuffer() {
 174     guarantee(_retired, "Allocation buffer has not been retired");
 175   }
 176 
 177   virtual void set_buf(HeapWord* buf) {
 178     ParGCAllocBuffer::set_buf(buf);
 179     _retired = false;
 180   }
 181 
 182   virtual void retire(bool end_of_gc, bool retain) {
 183     if (_retired) {
 184       return;
 185     }
 186     ParGCAllocBuffer::retire(end_of_gc, retain);
 187     _retired = true;
 188   }
 189 };
 190 
 191 class G1ParGCAllocator : public CHeapObj<mtGC> {
 192   friend class G1ParScanThreadState;
 193 protected:
 194   G1CollectedHeap* _g1h;
 195 
 196   // The survivor alignment in effect in bytes.
 197   // == 0 : don't align survivors
 198   // != 0 : align survivors to that alignment
 199   // These values were chosen to favor the non-alignment case since some
 200   // architectures have a special compare against zero instructions.
 201   const uint _survivor_alignment_bytes;
 202 
 203   size_t _alloc_buffer_waste;
 204   size_t _undo_waste;
 205 
 206   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
 207   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 208 
 209   virtual void retire_alloc_buffers() = 0;
 210   virtual G1ParGCAllocBuffer* alloc_buffer(in_cset_state_t dest, AllocationContext_t context) = 0;
 211 
 212   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 213   // there are no restrictions on survivor alignment.
 214   static uint calc_survivor_alignment_bytes() {
 215     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 216     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 217       // No need to align objects in the survivors differently, return 0
 218       // which means "survivor alignment is not used".
 219       return 0;
 220     } else {
 221       assert(SurvivorAlignmentInBytes > 0, "sanity");
 222       return SurvivorAlignmentInBytes;
 223     }
 224   }
 225 
 226 public:
 227   G1ParGCAllocator(G1CollectedHeap* g1h) :
 228     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
 229     _alloc_buffer_waste(0), _undo_waste(0) {
 230   }
 231 
 232   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
 233 
 234   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
 235   size_t undo_waste() {return _undo_waste; }
 236 
 237   // Allocate word_sz words in dest, either directly into the regions or by
 238   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 239   // not successful.
 240   HeapWord* allocate_direct_or_new_plab(in_cset_state_t dest,
 241                                         size_t word_sz,
 242                                         AllocationContext_t context);
 243 
 244   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 245   // allocated memory, NULL if not successful.
 246   HeapWord* plab_allocate(in_cset_state_t dest,
 247                           size_t word_sz,
 248                           AllocationContext_t context) {
 249     G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
 250     if (_survivor_alignment_bytes == 0) {
 251       return buffer->allocate(word_sz);
 252     } else {
 253       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
 254     }
 255   }
 256 
 257   HeapWord* allocate(in_cset_state_t dest, size_t word_sz,
 258                      AllocationContext_t context) {
 259     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 260     if (obj != NULL) {
 261       return obj;
 262     }
 263     return allocate_direct_or_new_plab(dest, word_sz, context);
 264   }
 265 
 266   void undo_allocation(in_cset_state_t dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
 267     if (alloc_buffer(dest, context)->contains(obj)) {
 268       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
 269              "should contain whole object");
 270       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
 271     } else {
 272       CollectedHeap::fill_with_object(obj, word_sz);
 273       add_to_undo_waste(word_sz);
 274     }
 275   }
 276 };
 277 
 278 class G1DefaultParGCAllocator : public G1ParGCAllocator {
 279   G1ParGCAllocBuffer  _surviving_alloc_buffer;
 280   G1ParGCAllocBuffer  _tenured_alloc_buffer;
 281   G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
 282 
 283 public:
 284   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 285 
 286   virtual G1ParGCAllocBuffer* alloc_buffer(in_cset_state_t dest, AllocationContext_t context) {
 287     assert(dest < InCSetState::Num,
 288            err_msg("Allocation buffer index out-of-bounds: %d", dest));
 289     assert(_alloc_buffers[dest] != NULL,
 290            err_msg("Allocation buffer is NULL: %d", dest));
 291     return _alloc_buffers[dest];
 292   }
 293 
 294   virtual void retire_alloc_buffers() ;
 295 };
 296 
 297 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP