1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/blockOffsetTable.hpp"
  30 #include "memory/threadLocalAllocBuffer.hpp"
  31 #include "utilities/globalDefinitions.hpp"
  32 
  33 // Forward decl.
  34 
  35 class PLABStats;
  36 
  37 // A per-thread allocation buffer used during GC.
  38 class ParGCAllocBuffer: public CHeapObj<mtGC> {
  39 protected:
  40   char head[32];
  41   size_t _word_sz;          // in HeapWord units
  42   HeapWord* _bottom;
  43   HeapWord* _top;
  44   HeapWord* _end;       // last allocatable address + 1
  45   HeapWord* _hard_end;  // _end + AlignmentReserve
  46   bool      _retained;  // whether we hold a _retained_filler
  47   MemRegion _retained_filler;
  48   // In support of ergonomic sizing of PLAB's
  49   size_t    _allocated;     // in HeapWord units
  50   size_t    _wasted;        // in HeapWord units
  51   char tail[32];
  52   static size_t FillerHeaderSize;
  53   static size_t AlignmentReserve;
  54 
  55   // Flush the stats supporting ergonomic sizing of PLAB's
  56   // Should not be called directly
  57   void flush_stats(PLABStats* stats);
  58 
  59 public:
  60   // Initializes the buffer to be empty, but with the given "word_sz".
  61   // Must get initialized with "set_buf" for an allocation to succeed.
  62   ParGCAllocBuffer(size_t word_sz);
  63   virtual ~ParGCAllocBuffer() {}
  64 
  65   static const size_t min_size() {
  66     return ThreadLocalAllocBuffer::min_size();
  67   }
  68 
  69   static const size_t max_size() {
  70     return ThreadLocalAllocBuffer::max_size();
  71   }
  72 
  73   // If an allocation of the given "word_sz" can be satisfied within the
  74   // buffer, do the allocation, returning a pointer to the start of the
  75   // allocated block.  If the allocation request cannot be satisfied,
  76   // return NULL.
  77   HeapWord* allocate(size_t word_sz) {
  78     HeapWord* res = _top;
  79     if (pointer_delta(_end, _top) >= word_sz) {
  80       _top = _top + word_sz;
  81       return res;
  82     } else {
  83       return NULL;
  84     }
  85   }
  86 
  87   // Undo the last allocation in the buffer, which is required to be of the
  88   // "obj" of the given "word_sz".
  89   void undo_allocation(HeapWord* obj, size_t word_sz) {
  90     assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
  91     assert(pointer_delta(_top, obj)     == word_sz, "Bad undo");
  92     _top = obj;
  93   }
  94 
  95   // The total (word) size of the buffer, including both allocated and
  96   // unallocted space.
  97   size_t word_sz() { return _word_sz; }
  98 
  99   // Should only be done if we are about to reset with a new buffer of the
 100   // given size.
 101   void set_word_size(size_t new_word_sz) {
 102     assert(new_word_sz > AlignmentReserve, "Too small");
 103     _word_sz = new_word_sz;
 104   }
 105 
 106   // The number of words of unallocated space remaining in the buffer.
 107   size_t words_remaining() {
 108     assert(_end >= _top, "Negative buffer");
 109     return pointer_delta(_end, _top, HeapWordSize);
 110   }
 111 
 112   bool contains(void* addr) {
 113     return (void*)_bottom <= addr && addr < (void*)_hard_end;
 114   }
 115 
 116   // Sets the space of the buffer to be [buf, space+word_sz()).
 117   virtual void set_buf(HeapWord* buf) {
 118     _bottom   = buf;
 119     _top      = _bottom;
 120     _hard_end = _bottom + word_sz();
 121     _end      = _hard_end - AlignmentReserve;
 122     assert(_end >= _top, "Negative buffer");
 123     // In support of ergonomic sizing
 124     _allocated += word_sz();
 125   }
 126 
 127   // Flush the stats supporting ergonomic sizing of PLAB's
 128   // and retire the current buffer.
 129   void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
 130     // We flush the stats first in order to get a reading of
 131     // unused space in the last buffer.
 132     if (ResizePLAB) {
 133       flush_stats(stats);
 134 
 135       // Since we have flushed the stats we need to clear
 136       // the _allocated and _wasted fields. Not doing so
 137       // will artifically inflate the values in the stats
 138       // to which we add them.
 139       // The next time we flush these values, we will add
 140       // what we have just flushed in addition to the size
 141       // of the buffers allocated between now and then.
 142       _allocated = 0;
 143       _wasted = 0;
 144     }
 145     // Retire the last allocation buffer.
 146     retire(end_of_gc, retain);
 147   }
 148 
 149   // Force future allocations to fail and queries for contains()
 150   // to return false
 151   void invalidate() {
 152     assert(!_retained, "Shouldn't retain an invalidated buffer.");
 153     _end    = _hard_end;
 154     _wasted += pointer_delta(_end, _top);  // unused  space
 155     _top    = _end;      // force future allocations to fail
 156     _bottom = _end;      // force future contains() queries to return false
 157   }
 158 
 159   // Fills in the unallocated portion of the buffer with a garbage object.
 160   // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
 161   // is true, attempt to re-use the unused portion in the next GC.
 162   virtual void retire(bool end_of_gc, bool retain);
 163 
 164   void print() PRODUCT_RETURN;
 165 };
 166 
 167 // PLAB stats book-keeping
 168 class PLABStats VALUE_OBJ_CLASS_SPEC {
 169   size_t _allocated;      // total allocated
 170   size_t _wasted;         // of which wasted (internal fragmentation)
 171   size_t _unused;         // Unused in last buffer
 172   size_t _used;           // derived = allocated - wasted - unused
 173   size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
 174   AdaptiveWeightedAverage
 175          _filter;         // integrator with decay
 176 
 177  public:
 178   PLABStats(size_t desired_plab_sz_, unsigned wt) :
 179     _allocated(0),
 180     _wasted(0),
 181     _unused(0),
 182     _used(0),
 183     _desired_plab_sz(desired_plab_sz_),
 184     _filter(wt)
 185   { }
 186 
 187   static const size_t min_size() {
 188     return ParGCAllocBuffer::min_size();
 189   }
 190 
 191   static const size_t max_size() {
 192     return ParGCAllocBuffer::max_size();
 193   }
 194 
 195   size_t desired_plab_sz() {
 196     return _desired_plab_sz;
 197   }
 198 
 199   void adjust_desired_plab_sz(uint no_of_gc_workers);
 200                                  // filter computation, latches output to
 201                                  // _desired_plab_sz, clears sensor accumulators
 202 
 203   void add_allocated(size_t v) {
 204     Atomic::add_ptr(v, &_allocated);
 205   }
 206 
 207   void add_unused(size_t v) {
 208     Atomic::add_ptr(v, &_unused);
 209   }
 210 
 211   void add_wasted(size_t v) {
 212     Atomic::add_ptr(v, &_wasted);
 213   }
 214 };
 215 
 216 class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
 217   BlockOffsetArrayContigSpace _bt;
 218   BlockOffsetSharedArray*     _bsa;
 219   HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
 220 
 221   static const size_t ChunkSizeInWords;
 222   static const size_t ChunkSizeInBytes;
 223   HeapWord* allocate_slow(size_t word_sz);
 224 
 225   void fill_region_with_block(MemRegion mr, bool contig);
 226 
 227 public:
 228   ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
 229 
 230   HeapWord* allocate(size_t word_sz) {
 231     HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
 232     if (res != NULL) {
 233       _bt.alloc_block(res, word_sz);
 234     } else {
 235       res = allocate_slow(word_sz);
 236     }
 237     return res;
 238   }
 239 
 240   void undo_allocation(HeapWord* obj, size_t word_sz);
 241 
 242   virtual void set_buf(HeapWord* buf_start) {
 243     ParGCAllocBuffer::set_buf(buf_start);
 244     _true_end = _hard_end;
 245     _bt.set_region(MemRegion(buf_start, word_sz()));
 246     _bt.initialize_threshold();
 247   }
 248 
 249   virtual void retire(bool end_of_gc, bool retain);
 250 
 251   MemRegion range() {
 252     return MemRegion(_top, _true_end);
 253   }
 254 };
 255 
 256 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP