1 /*
   2  * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1ALLOCATOR_HPP
  26 #define SHARE_VM_GC_G1_G1ALLOCATOR_HPP
  27 
  28 #include "gc/g1/g1AllocRegion.hpp"
  29 #include "gc/g1/g1AllocationContext.hpp"
  30 #include "gc/g1/g1InCSetState.hpp"
  31 #include "gc/shared/collectedHeap.hpp"
  32 #include "gc/shared/plab.hpp"
  33 
  34 class EvacuationInfo;
  35 
  36 // Interface to keep track of which regions G1 is currently allocating into. Provides
  37 // some accessors (e.g. allocating into them, or getting their occupancy).
  38 // Also keeps track of retained regions across GCs.
  39 class G1Allocator : public CHeapObj<mtGC> {
  40   friend class VMStructs;
  41 protected:
  42   G1CollectedHeap* _g1h;
  43 
  44   static inline bool is_humongous(size_t word_size);
  45 
  46   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
  47 
  48   virtual bool survivor_is_full(AllocationContext_t context) const = 0;
  49   virtual bool old_is_full(AllocationContext_t context) const = 0;
  50 
  51   virtual void set_survivor_full(AllocationContext_t context) = 0;
  52   virtual void set_old_full(AllocationContext_t context) = 0;
  53 
  54   // Accessors to the allocation regions.
  55   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
  56   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
  57 
  58   // Allocation attempt during GC for a survivor object / PLAB.
  59   inline HeapWord* survivor_attempt_allocation(size_t min_word_size,
  60                                                size_t desired_word_size,
  61                                                size_t* actual_word_size,
  62                                                AllocationContext_t context);
  63   // Allocation attempt during GC for an old object / PLAB.
  64   inline HeapWord* old_attempt_allocation(size_t min_word_size,
  65                                           size_t desired_word_size,
  66                                           size_t* actual_word_size,
  67                                           AllocationContext_t context);
  68 
  69   // Second-level mutator allocation attempt: take the Heap_lock and
  70   // retry the allocation attempt, potentially scheduling a GC
  71   // pause. This should only be used for non-humongous allocations.
  72   HeapWord* attempt_allocation_slow(size_t word_size,
  73                                     AllocationContext_t context,
  74                                     uint* gc_count_before_ret,
  75                                     uint* gclocker_retry_count_ret);
  76 
  77   // Takes the Heap_lock and attempts a humongous allocation. It can
  78   // potentially schedule a GC pause.
  79   HeapWord* attempt_allocation_humongous(size_t word_size,
  80                                          uint* gc_count_before_ret,
  81                                          uint* gclocker_retry_count_ret);
  82 
  83   // Initialize a contiguous set of free regions of length num_regions
  84   // and starting at index first so that they appear as a single
  85   // humongous region.
  86   HeapWord* humongous_obj_allocate_initialize_regions(uint first,
  87                                                       uint num_regions,
  88                                                       size_t word_size,
  89                                                       AllocationContext_t context);
  90 
  91   // Attempt to allocate a humongous object of the given size. Return
  92   // NULL if unsuccessful.
  93   HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
  94 
  95   // Returns the number of regions the humongous object of the given word size
  96   // requires.
  97   static size_t humongous_obj_size_in_regions(size_t word_size);
  98 
  99   // The following three methods take a gc_count_before_ret
 100   // parameter which is used to return the GC count if the method
 101   // returns NULL. Given that we are required to read the GC count
 102   // while holding the Heap_lock, and these paths will take the
 103   // Heap_lock at some point, it's easier to get them to read the GC
 104   // count while holding the Heap_lock before they return NULL instead
 105   // of the caller (namely: mem_allocate()) having to also take the
 106   // Heap_lock just to read the GC count.
 107 
 108   // First-level mutator allocation attempt: try to allocate out of
 109   // the mutator alloc region without taking the Heap_lock. This
 110   // should only be used for non-humongous allocations.
 111   inline HeapWord* attempt_allocation(size_t word_size,
 112                                       uint* gc_count_before_ret,
 113                                       uint* gclocker_retry_count_ret);
 114 
 115 public:
 116   G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
 117   virtual ~G1Allocator() { }
 118 
 119   static G1Allocator* create_allocator(G1CollectedHeap* g1h);
 120 
 121 #ifdef ASSERT
 122   // Do we currently have an active mutator region to allocate into?
 123   bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
 124 #endif
 125 
 126   // Allocation attempt that should be called during safepoints (e.g.,
 127   // at the end of a successful GC). expect_null_mutator_alloc_region
 128   // specifies whether the mutator alloc region is expected to be NULL
 129   // or not.
 130   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 131                                             AllocationContext_t context,
 132                                             bool expect_null_mutator_alloc_region);
 133 
 134   // This is a non-product method that is helpful for testing. It is
 135   // called at the end of a GC and artificially expands the heap by
 136   // allocating a number of dead regions. This way we can induce very
 137   // frequent marking cycles and stress the cleanup / concurrent
 138   // cleanup code more (as all the regions that will be allocated by
 139   // this method will be found dead by the marking cycle).
 140   void allocate_dummy_regions(size_t word_size) PRODUCT_RETURN;
 141 
 142   // The following two methods, allocate_new_tlab() and
 143   // mem_allocate(), are the two main entry points from the runtime
 144   // into the G1's allocation routines. They have the following
 145   // assumptions:
 146   //
 147   // * They should both be called outside safepoints.
 148   //
 149   // * They should both be called without holding the Heap_lock.
 150   //
 151   // * All allocation requests for new TLABs should go to
 152   //   allocate_new_tlab().
 153   //
 154   // * All non-TLAB allocation requests should go to mem_allocate().
 155   //
 156   // * If either call cannot satisfy the allocation request using the
 157   //   current allocating region, they will try to get a new one. If
 158   //   this fails, they will attempt to do an evacuation pause and
 159   //   retry the allocation.
 160   //
 161   // * If all allocation attempts fail, even after trying to schedule
 162   //   an evacuation pause, allocate_new_tlab() will return NULL,
 163   //   whereas mem_allocate() will attempt a heap expansion and/or
 164   //   schedule a Full GC.
 165   //
 166   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 167   //   should never be called with word_size being humongous. All
 168   //   humongous allocation requests should go to mem_allocate() which
 169   //   will satisfy them with a special path.
 170 
 171   HeapWord* allocate_new_tlab(size_t word_size);
 172 
 173   virtual HeapWord* mem_allocate(size_t word_size,
 174                                  bool*  gc_overhead_limit_was_exceeded);
 175 
 176   virtual void init_mutator_alloc_region() = 0;
 177   virtual void release_mutator_alloc_region() = 0;
 178 
 179   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
 180   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
 181   virtual void abandon_gc_alloc_regions() = 0;
 182 
 183   // Management of retained regions.
 184 
 185   virtual bool is_retained_old_region(HeapRegion* hr) = 0;
 186   void reuse_retained_old_region(EvacuationInfo& evacuation_info,
 187                                  OldGCAllocRegion* old,
 188                                  HeapRegion** retained);
 189 
 190   // Allocate blocks of memory during mutator time.
 191 
 192   inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
 193   inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
 194   inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
 195 
 196   size_t unsafe_max_tlab_alloc(AllocationContext_t context);
 197 
 198   // Allocate blocks of memory during garbage collection. Will ensure an
 199   // allocation region, either by picking one or expanding the
 200   // heap, and then allocate a block of the given size. The block
 201   // may not be a humongous - it must fit into a single heap region.
 202   HeapWord* par_allocate_during_gc(InCSetState dest,
 203                                    size_t word_size,
 204                                    AllocationContext_t context);
 205 
 206   HeapWord* par_allocate_during_gc(InCSetState dest,
 207                                    size_t min_word_size,
 208                                    size_t desired_word_size,
 209                                    size_t* actual_word_size,
 210                                    AllocationContext_t context);
 211 
 212   virtual size_t used_in_alloc_regions() = 0;
 213 };
 214 
 215 // The default allocation region manager for G1. Provides a single mutator, survivor
 216 // and old generation allocation region.
 217 // Can retain the (single) old generation allocation region across GCs.
 218 class G1DefaultAllocator : public G1Allocator {
 219 private:
 220   bool _survivor_is_full;
 221   bool _old_is_full;
 222 protected:
 223   // Alloc region used to satisfy mutator allocation requests.
 224   MutatorAllocRegion _mutator_alloc_region;
 225 
 226   // Alloc region used to satisfy allocation requests by the GC for
 227   // survivor objects.
 228   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 229 
 230   // Alloc region used to satisfy allocation requests by the GC for
 231   // old objects.
 232   OldGCAllocRegion _old_gc_alloc_region;
 233 
 234   HeapRegion* _retained_old_gc_alloc_region;
 235 public:
 236   G1DefaultAllocator(G1CollectedHeap* heap);
 237 
 238   virtual bool survivor_is_full(AllocationContext_t context) const;
 239   virtual bool old_is_full(AllocationContext_t context) const ;
 240 
 241   virtual void set_survivor_full(AllocationContext_t context);
 242   virtual void set_old_full(AllocationContext_t context);
 243 
 244   virtual void init_mutator_alloc_region();
 245   virtual void release_mutator_alloc_region();
 246 
 247   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
 248   virtual void release_gc_alloc_regions(EvacuationInfo& evacuation_info);
 249   virtual void abandon_gc_alloc_regions();
 250 
 251   virtual bool is_retained_old_region(HeapRegion* hr) {
 252     return _retained_old_gc_alloc_region == hr;
 253   }
 254 
 255   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
 256     return &_mutator_alloc_region;
 257   }
 258 
 259   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
 260     return &_survivor_gc_alloc_region;
 261   }
 262 
 263   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
 264     return &_old_gc_alloc_region;
 265   }
 266 
 267   virtual size_t used_in_alloc_regions() {
 268     assert(Heap_lock->owner() != NULL,
 269            "Should be owned on this thread's behalf.");
 270     size_t result = 0;
 271 
 272     // Read only once in case it is set to NULL concurrently
 273     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
 274     if (hr != NULL) {
 275       result += hr->used();
 276     }
 277     return result;
 278   }
 279 };
 280 
 281 class G1PLAB: public PLAB {
 282 private:
 283   bool _retired;
 284 
 285 public:
 286   G1PLAB(size_t gclab_word_size);
 287   virtual ~G1PLAB() {
 288     guarantee(_retired, "Allocation buffer has not been retired");
 289   }
 290 
 291   // The amount of space in words wasted within the PLAB including
 292   // waste due to refills and alignment.
 293   size_t wasted() const { return _wasted; }
 294 
 295   virtual void set_buf(HeapWord* buf, size_t word_size) {
 296     PLAB::set_buf(buf, word_size);
 297     _retired = false;
 298   }
 299 
 300   virtual void retire() {
 301     if (_retired) {
 302       return;
 303     }
 304     PLAB::retire();
 305     _retired = true;
 306   }
 307 
 308   virtual void flush_and_retire_stats(PLABStats* stats) {
 309     PLAB::flush_and_retire_stats(stats);
 310     _retired = true;
 311   }
 312 };
 313 
 314 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
 315 // Needs to handle multiple contexts, extra alignment in any "survivor" area and some
 316 // statistics.
 317 class G1PLABAllocator : public CHeapObj<mtGC> {
 318   friend class G1ParScanThreadState;
 319 protected:
 320   G1CollectedHeap* _g1h;
 321   G1Allocator* _allocator;
 322 
 323   // The survivor alignment in effect in bytes.
 324   // == 0 : don't align survivors
 325   // != 0 : align survivors to that alignment
 326   // These values were chosen to favor the non-alignment case since some
 327   // architectures have a special compare against zero instructions.
 328   const uint _survivor_alignment_bytes;
 329 
 330   // Number of words allocated directly (not counting PLAB allocation).
 331   size_t _direct_allocated[InCSetState::Num];
 332 
 333   virtual void flush_and_retire_stats() = 0;
 334   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 335 
 336   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
 337   // there are no restrictions on survivor alignment.
 338   static uint calc_survivor_alignment_bytes() {
 339     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
 340     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
 341       // No need to align objects in the survivors differently, return 0
 342       // which means "survivor alignment is not used".
 343       return 0;
 344     } else {
 345       assert(SurvivorAlignmentInBytes > 0, "sanity");
 346       return SurvivorAlignmentInBytes;
 347     }
 348   }
 349 
 350   HeapWord* allocate_new_plab(InCSetState dest,
 351                               size_t word_sz,
 352                               AllocationContext_t context);
 353 
 354   bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const;
 355 public:
 356   G1PLABAllocator(G1Allocator* allocator);
 357   virtual ~G1PLABAllocator() { }
 358 
 359   static G1PLABAllocator* create_allocator(G1Allocator* allocator);
 360 
 361   virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
 362 
 363   // Allocate word_sz words in dest, either directly into the regions or by
 364   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
 365   // not successful. Plab_refill_failed indicates whether an attempt to refill the
 366   // PLAB failed or not.
 367   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
 368                                         size_t word_sz,
 369                                         AllocationContext_t context,
 370                                         bool* plab_refill_failed);
 371 
 372   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
 373   // allocated memory, NULL if not successful.
 374   inline HeapWord* plab_allocate(InCSetState dest,
 375                                  size_t word_sz,
 376                                  AllocationContext_t context);
 377 
 378   HeapWord* allocate(InCSetState dest,
 379                      size_t word_sz,
 380                      AllocationContext_t context,
 381                      bool* refill_failed) {
 382     HeapWord* const obj = plab_allocate(dest, word_sz, context);
 383     if (obj != NULL) {
 384       return obj;
 385     }
 386     return allocate_direct_or_new_plab(dest, word_sz, context, refill_failed);
 387   }
 388 
 389   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
 390 };
 391 
 392 // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
 393 // and old generation allocation.
 394 class G1DefaultPLABAllocator : public G1PLABAllocator {
 395   G1PLAB  _surviving_alloc_buffer;
 396   G1PLAB  _tenured_alloc_buffer;
 397   G1PLAB* _alloc_buffers[InCSetState::Num];
 398 
 399 public:
 400   G1DefaultPLABAllocator(G1Allocator* _allocator);
 401 
 402   virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
 403     assert(dest.is_valid(),
 404            "Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
 405     assert(_alloc_buffers[dest.value()] != NULL,
 406            "Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value());
 407     return _alloc_buffers[dest.value()];
 408   }
 409 
 410   virtual void flush_and_retire_stats();
 411 
 412   virtual void waste(size_t& wasted, size_t& undo_wasted);
 413 };
 414 
 415 // G1ArchiveAllocator is used to allocate memory in archive
 416 // regions. Such regions are not modifiable by GC, being neither
 417 // scavenged nor compacted, or even marked in the object header.
 418 // They can contain no pointers to non-archive heap regions,
 419 class G1ArchiveAllocator : public CHeapObj<mtGC> {
 420 
 421 protected:
 422   G1CollectedHeap* _g1h;
 423 
 424   // The current allocation region
 425   HeapRegion* _allocation_region;
 426 
 427   // Regions allocated for the current archive range.
 428   GrowableArray<HeapRegion*> _allocated_regions;
 429 
 430   // The number of bytes used in the current range.
 431   size_t _summary_bytes_used;
 432 
 433   // Current allocation window within the current region.
 434   HeapWord* _bottom;
 435   HeapWord* _top;
 436   HeapWord* _max;
 437 
 438   // Allocate a new region for this archive allocator.
 439   // Allocation is from the top of the reserved heap downward.
 440   bool alloc_new_region();
 441 
 442 public:
 443   G1ArchiveAllocator(G1CollectedHeap* g1h) :
 444     _g1h(g1h),
 445     _allocation_region(NULL),
 446     _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
 447                                                          ResourceObj::C_HEAP),
 448                         2), true /* C_Heap */),
 449     _summary_bytes_used(0),
 450     _bottom(NULL),
 451     _top(NULL),
 452     _max(NULL) { }
 453 
 454   virtual ~G1ArchiveAllocator() {
 455     assert(_allocation_region == NULL, "_allocation_region not NULL");
 456   }
 457 
 458   static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h);
 459 
 460   // Allocate memory for an individual object.
 461   HeapWord* archive_mem_allocate(size_t word_size);
 462 
 463   // Return the memory ranges used in the current archive, after
 464   // aligning to the requested alignment.
 465   void complete_archive(GrowableArray<MemRegion>* ranges,
 466                         size_t end_alignment_in_bytes);
 467 
 468   // The number of bytes allocated by this allocator.
 469   size_t used() {
 470     return _summary_bytes_used;
 471   }
 472 
 473   // Clear the count of bytes allocated in prior G1 regions. This
 474   // must be done when recalculate_use is used to reset the counter
 475   // for the generic allocator, since it counts bytes in all G1
 476   // regions, including those still associated with this allocator.
 477   void clear_used() {
 478     _summary_bytes_used = 0;
 479   }
 480 
 481 };
 482 
 483 #endif // SHARE_VM_GC_G1_G1ALLOCATOR_HPP