1 /*
   2  * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1ALLOCREGION_HPP
  26 #define SHARE_VM_GC_G1_G1ALLOCREGION_HPP
  27 
  28 #include "gc/g1/heapRegion.hpp"
  29 #include "gc/g1/g1EvacStats.hpp"
  30 #include "gc/g1/g1InCSetState.hpp"
  31 
  32 class G1CollectedHeap;
  33 
  34 // 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
  35 #define G1_ALLOC_REGION_TRACING 0
  36 
  37 class ar_ext_msg;
  38 
  39 // A class that holds a region that is active in satisfying allocation
  40 // requests, potentially issued in parallel. When the active region is
  41 // full it will be retired and replaced with a new one. The
  42 // implementation assumes that fast-path allocations will be lock-free
  43 // and a lock will need to be taken when the active region needs to be
  44 // replaced.
  45 
  46 class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
  47   friend class ar_ext_msg;
  48 
  49 private:
  50   // The active allocating region we are currently allocating out
  51   // of. The invariant is that if this object is initialized (i.e.,
  52   // init() has been called and release() has not) then _alloc_region
  53   // is either an active allocating region or the dummy region (i.e.,
  54   // it can never be NULL) and this object can be used to satisfy
  55   // allocation requests. If this object is not initialized
  56   // (i.e. init() has not been called or release() has been called)
  57   // then _alloc_region is NULL and this object should not be used to
  58   // satisfy allocation requests (it was done this way to force the
  59   // correct use of init() and release()).
  60   HeapRegion* volatile _alloc_region;
  61 
  62   // Allocation context associated with this alloc region.
  63   AllocationContext_t _allocation_context;
  64 
  65   // It keeps track of the distinct number of regions that are used
  66   // for allocation in the active interval of this object, i.e.,
  67   // between a call to init() and a call to release(). The count
  68   // mostly includes regions that are freshly allocated, as well as
  69   // the region that is re-used using the set() method. This count can
  70   // be used in any heuristics that might want to bound how many
  71   // distinct regions this object can used during an active interval.
  72   uint _count;
  73 
  74   // When we set up a new active region we save its used bytes in this
  75   // field so that, when we retire it, we can calculate how much space
  76   // we allocated in it.
  77   size_t _used_bytes_before;
  78 
  79   // When true, indicates that allocate calls should do BOT updates.
  80   const bool _bot_updates;
  81 
  82   // Useful for debugging and tracing.
  83   const char* _name;
  84 
  85   // A dummy region (i.e., it's been allocated specially for this
  86   // purpose and it is not part of the heap) that is full (i.e., top()
  87   // == end()). When we don't have a valid active region we make
  88   // _alloc_region point to this. This allows us to skip checking
  89   // whether the _alloc_region is NULL or not.
  90   static HeapRegion* _dummy_region;
  91 
  92   // Some of the methods below take a bot_updates parameter. Its value
  93   // should be the same as the _bot_updates field. The idea is that
  94   // the parameter will be a constant for a particular alloc region
  95   // and, given that these methods will be hopefully inlined, the
  96   // compiler should compile out the test.
  97 
  98   // Perform a non-MT-safe allocation out of the given region.
  99   static inline HeapWord* allocate(HeapRegion* alloc_region,
 100                                    size_t word_size,
 101                                    bool bot_updates);
 102 
 103   // Perform a MT-safe allocation out of the given region.
 104   static inline HeapWord* par_allocate(HeapRegion* alloc_region,
 105                                        size_t word_size,
 106                                        bool bot_updates);
 107   // Perform a MT-safe allocation out of the given region, with the given
 108   // minimum and desired size. Returns the actual size allocated (between
 109   // minimum and desired size) in actual_word_size if the allocation has been
 110   // successful.
 111   static inline HeapWord* par_allocate(HeapRegion* alloc_region,
 112                                        size_t min_word_size,
 113                                        size_t desired_word_size,
 114                                        size_t* actual_word_size,
 115                                        bool bot_updates);
 116 
 117   // Ensure that the region passed as a parameter has been filled up
 118   // so that noone else can allocate out of it any more.
 119   // Returns the number of bytes that have been wasted by filled up
 120   // the space.
 121   static size_t fill_up_remaining_space(HeapRegion* alloc_region,
 122                                         bool bot_updates);
 123 
 124   // After a region is allocated by alloc_new_region, this
 125   // method is used to set it as the active alloc_region
 126   void update_alloc_region(HeapRegion* alloc_region);
 127 
 128   // Allocate a new active region and use it to perform a word_size
 129   // allocation. The force parameter will be passed on to
 130   // G1CollectedHeap::allocate_new_alloc_region() and tells it to try
 131   // to allocate a new region even if the max has been reached.
 132   HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
 133 
 134   void fill_in_ext_msg(ar_ext_msg* msg, const char* message);
 135 
 136 protected:
 137   // Retire the active allocating region. If fill_up is true then make
 138   // sure that the region is full before we retire it so that no one
 139   // else can allocate out of it.
 140   // Returns the number of bytes that have been filled up during retire.
 141   virtual size_t retire(bool fill_up);
 142 
 143   // For convenience as subclasses use it.
 144   static G1CollectedHeap* _g1h;
 145 
 146   virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0;
 147   virtual void retire_region(HeapRegion* alloc_region,
 148                              size_t allocated_bytes) = 0;
 149 
 150   G1AllocRegion(const char* name, bool bot_updates);
 151 
 152 public:
 153   static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
 154 
 155   HeapRegion* get() const {
 156     HeapRegion * hr = _alloc_region;
 157     // Make sure that the dummy region does not escape this class.
 158     return (hr == _dummy_region) ? NULL : hr;
 159   }
 160 
 161   void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
 162   AllocationContext_t  allocation_context() { return _allocation_context; }
 163 
 164   uint count() { return _count; }
 165 
 166   // The following two are the building blocks for the allocation method.
 167 
 168   // First-level allocation: Should be called without holding a
 169   // lock. It will try to allocate lock-free out of the active region,
 170   // or return NULL if it was unable to.
 171   inline HeapWord* attempt_allocation(size_t word_size,
 172                                       bool bot_updates);
 173   // Perform an allocation out of the current allocation region, with the given
 174   // minimum and desired size. Returns the actual size allocated (between
 175   // minimum and desired size) in actual_word_size if the allocation has been
 176   // successful.
 177   // Should be called without holding a lock. It will try to allocate lock-free
 178   // out of the active region, or return NULL if it was unable to.
 179   inline HeapWord* attempt_allocation(size_t min_word_size,
 180                                       size_t desired_word_size,
 181                                       size_t* actual_word_size,
 182                                       bool bot_updates);
 183 
 184   // Second-level allocation: Should be called while holding a
 185   // lock. It will try to first allocate lock-free out of the active
 186   // region or, if it's unable to, it will try to replace the active
 187   // alloc region with a new one. We require that the caller takes the
 188   // appropriate lock before calling this so that it is easier to make
 189   // it conform to its locking protocol.
 190   inline HeapWord* attempt_allocation_locked(size_t word_size,
 191                                              bool bot_updates);
 192   // Same as attempt_allocation_locked(size_t, bool), but allowing specification
 193   // of minimum word size of the block in min_word_size, and the maximum word
 194   // size of the allocation in desired_word_size. The actual size of the block is
 195   // returned in actual_word_size.
 196   inline HeapWord* attempt_allocation_locked(size_t min_word_size,
 197                                              size_t desired_word_size,
 198                                              size_t* actual_word_size,
 199                                              bool bot_updates);
 200 
 201   // Should be called to allocate a new region even if the max of this
 202   // type of regions has been reached. Should only be called if other
 203   // allocation attempts have failed and we are not holding a valid
 204   // active region.
 205   inline HeapWord* attempt_allocation_force(size_t word_size,
 206                                             bool bot_updates);
 207 
 208   // Should be called before we start using this object.
 209   void init();
 210 
 211   // This can be used to set the active region to a specific
 212   // region. (Use Example: we try to retain the last old GC alloc
 213   // region that we've used during a GC and we can use set() to
 214   // re-instate it at the beginning of the next GC.)
 215   void set(HeapRegion* alloc_region);
 216 
 217   // Should be called when we want to release the active region which
 218   // is returned after it's been retired.
 219   virtual HeapRegion* release();
 220 
 221 #if G1_ALLOC_REGION_TRACING
 222   void trace(const char* str,
 223              size_t min_word_size = 0,
 224              size_t desired_word_size = 0,
 225              size_t actual_word_size = 0,
 226              HeapWord* result = NULL);
 227 #else // G1_ALLOC_REGION_TRACING
 228   void trace(const char* str,
 229              size_t min_word_size = 0,
 230              size_t desired_word_size = 0,
 231              size_t actual_word_size = 0,
 232              HeapWord* result = NULL) { }
 233 #endif // G1_ALLOC_REGION_TRACING
 234 };
 235 
 236 class MutatorAllocRegion : public G1AllocRegion {
 237 protected:
 238   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 239   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 240 public:
 241   MutatorAllocRegion()
 242     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 243 };
 244 
 245 // Common base class for allocation regions used during GC.
 246 class G1GCAllocRegion : public G1AllocRegion {
 247 protected:
 248   G1EvacStats* _stats;
 249   InCSetState::in_cset_state_t _purpose;
 250 
 251   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 252   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 253 
 254   virtual size_t retire(bool fill_up);
 255 public:
 256   G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, InCSetState::in_cset_state_t purpose)
 257   : G1AllocRegion(name, bot_updates), _stats(stats), _purpose(purpose) {
 258     assert(stats != NULL, "Must pass non-NULL PLAB statistics");
 259   }
 260 };
 261 
 262 class SurvivorGCAllocRegion : public G1GCAllocRegion {
 263 public:
 264   SurvivorGCAllocRegion(G1EvacStats* stats)
 265   : G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, InCSetState::Young) { }
 266 };
 267 
 268 class OldGCAllocRegion : public G1GCAllocRegion {
 269 public:
 270   OldGCAllocRegion(G1EvacStats* stats)
 271   : G1GCAllocRegion("Old GC Alloc Region", true /* bot_updates */, stats, InCSetState::Old) { }
 272 
 273   // This specialization of release() makes sure that the last card that has
 274   // been allocated into has been completely filled by a dummy object.  This
 275   // avoids races when remembered set scanning wants to update the BOT of the
 276   // last card in the retained old gc alloc region, and allocation threads
 277   // allocating into that card at the same time.
 278   virtual HeapRegion* release();
 279 };
 280 
 281 class ar_ext_msg : public err_msg {
 282 public:
 283   ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
 284     alloc_region->fill_in_ext_msg(this, message);
 285   }
 286 };
 287 
 288 #endif // SHARE_VM_GC_G1_G1ALLOCREGION_HPP