src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp

Print this page
rev 6251 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled.
Reviewed-by:


  43   HeapWord* _top;
  44   HeapWord* _end;       // last allocatable address + 1
  45   HeapWord* _hard_end;  // _end + AlignmentReserve
  46   bool      _retained;  // whether we hold a _retained_filler
  47   MemRegion _retained_filler;
  48   // In support of ergonomic sizing of PLAB's
  49   size_t    _allocated;     // in HeapWord units
  50   size_t    _wasted;        // in HeapWord units
  51   char tail[32];
  52   static size_t FillerHeaderSize;
  53   static size_t AlignmentReserve;
  54 
  55   // Flush the stats supporting ergonomic sizing of PLAB's
  56   // Should not be called directly
  57   void flush_stats(PLABStats* stats);
  58 
  59 public:
  60   // Initializes the buffer to be empty, but with the given "word_sz".
  61   // Must get initialized with "set_buf" for an allocation to succeed.
  62   ParGCAllocBuffer(size_t word_sz);

  63 
  64   static const size_t min_size() {
  65     return ThreadLocalAllocBuffer::min_size();
  66   }
  67 
  68   static const size_t max_size() {
  69     return ThreadLocalAllocBuffer::max_size();
  70   }
  71 
  72   // If an allocation of the given "word_sz" can be satisfied within the
  73   // buffer, do the allocation, returning a pointer to the start of the
  74   // allocated block.  If the allocation request cannot be satisfied,
  75   // return NULL.
  76   HeapWord* allocate(size_t word_sz) {
  77     HeapWord* res = _top;
  78     if (pointer_delta(_end, _top) >= word_sz) {
  79       _top = _top + word_sz;
  80       return res;
  81     } else {
  82       return NULL;


  96   size_t word_sz() { return _word_sz; }
  97 
  98   // Should only be done if we are about to reset with a new buffer of the
  99   // given size.
 100   void set_word_size(size_t new_word_sz) {
 101     assert(new_word_sz > AlignmentReserve, "Too small");
 102     _word_sz = new_word_sz;
 103   }
 104 
 105   // The number of words of unallocated space remaining in the buffer.
 106   size_t words_remaining() {
 107     assert(_end >= _top, "Negative buffer");
 108     return pointer_delta(_end, _top, HeapWordSize);
 109   }
 110 
 111   bool contains(void* addr) {
 112     return (void*)_bottom <= addr && addr < (void*)_hard_end;
 113   }
 114 
 115   // Sets the space of the buffer to be [buf, space+word_sz()).
 116   void set_buf(HeapWord* buf) {
 117     _bottom   = buf;
 118     _top      = _bottom;
 119     _hard_end = _bottom + word_sz();
 120     _end      = _hard_end - AlignmentReserve;
 121     assert(_end >= _top, "Negative buffer");
 122     // In support of ergonomic sizing
 123     _allocated += word_sz();
 124   }
 125 
 126   // Flush the stats supporting ergonomic sizing of PLAB's
 127   // and retire the current buffer.
 128   void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
 129     // We flush the stats first in order to get a reading of
 130     // unused space in the last buffer.
 131     if (ResizePLAB) {
 132       flush_stats(stats);
 133 
 134       // Since we have flushed the stats we need to clear
 135       // the _allocated and _wasted fields. Not doing so
 136       // will artifically inflate the values in the stats


 141       _allocated = 0;
 142       _wasted = 0;
 143     }
 144     // Retire the last allocation buffer.
 145     retire(end_of_gc, retain);
 146   }
 147 
 148   // Force future allocations to fail and queries for contains()
 149   // to return false
 150   void invalidate() {
 151     assert(!_retained, "Shouldn't retain an invalidated buffer.");
 152     _end    = _hard_end;
 153     _wasted += pointer_delta(_end, _top);  // unused  space
 154     _top    = _end;      // force future allocations to fail
 155     _bottom = _end;      // force future contains() queries to return false
 156   }
 157 
 158   // Fills in the unallocated portion of the buffer with a garbage object.
 159   // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
 160   // is true, attempt to re-use the unused portion in the next GC.
 161   void retire(bool end_of_gc, bool retain);
 162 
 163   void print() PRODUCT_RETURN;
 164 };
 165 
 166 // PLAB stats book-keeping
 167 class PLABStats VALUE_OBJ_CLASS_SPEC {
 168   size_t _allocated;      // total allocated
 169   size_t _wasted;         // of which wasted (internal fragmentation)
 170   size_t _unused;         // Unused in last buffer
 171   size_t _used;           // derived = allocated - wasted - unused
 172   size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
 173   AdaptiveWeightedAverage
 174          _filter;         // integrator with decay
 175 
 176  public:
 177   PLABStats(size_t desired_plab_sz_, unsigned wt) :
 178     _allocated(0),
 179     _wasted(0),
 180     _unused(0),
 181     _used(0),


 221   static const size_t ChunkSizeInBytes;
 222   HeapWord* allocate_slow(size_t word_sz);
 223 
 224   void fill_region_with_block(MemRegion mr, bool contig);
 225 
 226 public:
 227   ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
 228 
 229   HeapWord* allocate(size_t word_sz) {
 230     HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
 231     if (res != NULL) {
 232       _bt.alloc_block(res, word_sz);
 233     } else {
 234       res = allocate_slow(word_sz);
 235     }
 236     return res;
 237   }
 238 
 239   void undo_allocation(HeapWord* obj, size_t word_sz);
 240 
 241   void set_buf(HeapWord* buf_start) {
 242     ParGCAllocBuffer::set_buf(buf_start);
 243     _true_end = _hard_end;
 244     _bt.set_region(MemRegion(buf_start, word_sz()));
 245     _bt.initialize_threshold();
 246   }
 247 
 248   void retire(bool end_of_gc, bool retain);
 249 
 250   MemRegion range() {
 251     return MemRegion(_top, _true_end);
 252   }
 253 };
 254 
 255 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP


  43   HeapWord* _top;
  44   HeapWord* _end;       // last allocatable address + 1
  45   HeapWord* _hard_end;  // _end + AlignmentReserve
  46   bool      _retained;  // whether we hold a _retained_filler
  47   MemRegion _retained_filler;
  48   // In support of ergonomic sizing of PLAB's
  49   size_t    _allocated;     // in HeapWord units
  50   size_t    _wasted;        // in HeapWord units
  51   char tail[32];
  52   static size_t FillerHeaderSize;
  53   static size_t AlignmentReserve;
  54 
  55   // Flush the stats supporting ergonomic sizing of PLAB's
  56   // Should not be called directly
  57   void flush_stats(PLABStats* stats);
  58 
  59 public:
  60   // Initializes the buffer to be empty, but with the given "word_sz".
  61   // Must get initialized with "set_buf" for an allocation to succeed.
  62   ParGCAllocBuffer(size_t word_sz);
  63   virtual ~ParGCAllocBuffer() {}
  64 
  65   static const size_t min_size() {
  66     return ThreadLocalAllocBuffer::min_size();
  67   }
  68 
  69   static const size_t max_size() {
  70     return ThreadLocalAllocBuffer::max_size();
  71   }
  72 
  73   // If an allocation of the given "word_sz" can be satisfied within the
  74   // buffer, do the allocation, returning a pointer to the start of the
  75   // allocated block.  If the allocation request cannot be satisfied,
  76   // return NULL.
  77   HeapWord* allocate(size_t word_sz) {
  78     HeapWord* res = _top;
  79     if (pointer_delta(_end, _top) >= word_sz) {
  80       _top = _top + word_sz;
  81       return res;
  82     } else {
  83       return NULL;


  97   size_t word_sz() { return _word_sz; }
  98 
  99   // Should only be done if we are about to reset with a new buffer of the
 100   // given size.
 101   void set_word_size(size_t new_word_sz) {
 102     assert(new_word_sz > AlignmentReserve, "Too small");
 103     _word_sz = new_word_sz;
 104   }
 105 
 106   // The number of words of unallocated space remaining in the buffer.
 107   size_t words_remaining() {
 108     assert(_end >= _top, "Negative buffer");
 109     return pointer_delta(_end, _top, HeapWordSize);
 110   }
 111 
 112   bool contains(void* addr) {
 113     return (void*)_bottom <= addr && addr < (void*)_hard_end;
 114   }
 115 
 116   // Sets the space of the buffer to be [buf, space+word_sz()).
 117   virtual void set_buf(HeapWord* buf) {
 118     _bottom   = buf;
 119     _top      = _bottom;
 120     _hard_end = _bottom + word_sz();
 121     _end      = _hard_end - AlignmentReserve;
 122     assert(_end >= _top, "Negative buffer");
 123     // In support of ergonomic sizing
 124     _allocated += word_sz();
 125   }
 126 
 127   // Flush the stats supporting ergonomic sizing of PLAB's
 128   // and retire the current buffer.
 129   void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
 130     // We flush the stats first in order to get a reading of
 131     // unused space in the last buffer.
 132     if (ResizePLAB) {
 133       flush_stats(stats);
 134 
 135       // Since we have flushed the stats we need to clear
 136       // the _allocated and _wasted fields. Not doing so
 137       // will artifically inflate the values in the stats


 142       _allocated = 0;
 143       _wasted = 0;
 144     }
 145     // Retire the last allocation buffer.
 146     retire(end_of_gc, retain);
 147   }
 148 
 149   // Force future allocations to fail and queries for contains()
 150   // to return false
 151   void invalidate() {
 152     assert(!_retained, "Shouldn't retain an invalidated buffer.");
 153     _end    = _hard_end;
 154     _wasted += pointer_delta(_end, _top);  // unused  space
 155     _top    = _end;      // force future allocations to fail
 156     _bottom = _end;      // force future contains() queries to return false
 157   }
 158 
 159   // Fills in the unallocated portion of the buffer with a garbage object.
 160   // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
 161   // is true, attempt to re-use the unused portion in the next GC.
 162   virtual void retire(bool end_of_gc, bool retain);
 163 
 164   void print() PRODUCT_RETURN;
 165 };
 166 
 167 // PLAB stats book-keeping
 168 class PLABStats VALUE_OBJ_CLASS_SPEC {
 169   size_t _allocated;      // total allocated
 170   size_t _wasted;         // of which wasted (internal fragmentation)
 171   size_t _unused;         // Unused in last buffer
 172   size_t _used;           // derived = allocated - wasted - unused
 173   size_t _desired_plab_sz;// output of filter (below), suitably trimmed and quantized
 174   AdaptiveWeightedAverage
 175          _filter;         // integrator with decay
 176 
 177  public:
 178   PLABStats(size_t desired_plab_sz_, unsigned wt) :
 179     _allocated(0),
 180     _wasted(0),
 181     _unused(0),
 182     _used(0),


 222   static const size_t ChunkSizeInBytes;
 223   HeapWord* allocate_slow(size_t word_sz);
 224 
 225   void fill_region_with_block(MemRegion mr, bool contig);
 226 
 227 public:
 228   ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
 229 
 230   HeapWord* allocate(size_t word_sz) {
 231     HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
 232     if (res != NULL) {
 233       _bt.alloc_block(res, word_sz);
 234     } else {
 235       res = allocate_slow(word_sz);
 236     }
 237     return res;
 238   }
 239 
 240   void undo_allocation(HeapWord* obj, size_t word_sz);
 241 
 242   virtual void set_buf(HeapWord* buf_start) {
 243     ParGCAllocBuffer::set_buf(buf_start);
 244     _true_end = _hard_end;
 245     _bt.set_region(MemRegion(buf_start, word_sz()));
 246     _bt.initialize_threshold();
 247   }
 248 
 249   virtual void retire(bool end_of_gc, bool retain);
 250 
 251   MemRegion range() {
 252     return MemRegion(_top, _true_end);
 253   }
 254 };
 255 
 256 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP