1 /*
   2  * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_parGCAllocBuffer.cpp.incl"
  27 
  28 ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
  29   _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  30   _end(NULL), _hard_end(NULL),
  31   _retained(false), _retained_filler(),
  32   _allocated(0), _wasted(0)
  33 {
  34   assert (min_size() > AlignmentReserve, "Inconsistency!");
  35   // arrayOopDesc::header_size depends on command line initialization.
  36   FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
  37   AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
  38 }
  39 
  40 size_t ParGCAllocBuffer::FillerHeaderSize;
  41 
  42 // If the minimum object size is greater than MinObjAlignment, we can
  43 // end up with a shard at the end of the buffer that's smaller than
  44 // the smallest object.  We can't allow that because the buffer must
  45 // look like it's full of objects when we retire it, so we make
  46 // sure we have enough space for a filler int array object.
  47 size_t ParGCAllocBuffer::AlignmentReserve;
  48 
  49 void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
  50   assert(!retain || end_of_gc, "Can only retain at GC end.");
  51   if (_retained) {
  52     // If the buffer had been retained shorten the previous filler object.
  53     assert(_retained_filler.end() <= _top, "INVARIANT");
  54     CollectedHeap::fill_with_object(_retained_filler);
  55     // Wasted space book-keeping, otherwise (normally) done in invalidate()
  56     _wasted += _retained_filler.word_size();
  57     _retained = false;
  58   }
  59   assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
  60   if (_top < _hard_end) {
  61     CollectedHeap::fill_with_object(_top, _hard_end);
  62     if (!retain) {
  63       invalidate();
  64     } else {
  65       // Is there wasted space we'd like to retain for the next GC?
  66       if (pointer_delta(_end, _top) > FillerHeaderSize) {
  67         _retained = true;
  68         _retained_filler = MemRegion(_top, FillerHeaderSize);
  69         _top = _top + FillerHeaderSize;
  70       } else {
  71         invalidate();
  72       }
  73     }
  74   }
  75 }
  76 
  77 void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
  78   assert(ResizePLAB, "Wasted work");
  79   stats->add_allocated(_allocated);
  80   stats->add_wasted(_wasted);
  81   stats->add_unused(pointer_delta(_end, _top));
  82 }
  83 
  84 // Compute desired plab size and latch result for later
  85 // use. This should be called once at the end of parallel
  86 // scavenge; it clears the sensor accumulators.
  87 void PLABStats::adjust_desired_plab_sz() {
  88   assert(ResizePLAB, "Not set");
  89   if (_allocated == 0) {
  90     assert(_unused == 0, "Inconsistency in PLAB stats");
  91     _allocated = 1;
  92   }
  93   double wasted_frac    = (double)_unused/(double)_allocated;
  94   size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
  95                                    TargetPLABWastePct);
  96   if (target_refills == 0) {
  97     target_refills = 1;
  98   }
  99   _used = _allocated - _wasted - _unused;
 100   size_t plab_sz = _used/(target_refills*ParallelGCThreads);
 101   if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
 102   // Take historical weighted average
 103   _filter.sample(plab_sz);
 104   // Clip from above and below, and align to object boundary
 105   plab_sz = MAX2(min_size(), (size_t)_filter.average());
 106   plab_sz = MIN2(max_size(), plab_sz);
 107   plab_sz = align_object_size(plab_sz);
 108   // Latch the result
 109   if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
 110   if (ResizePLAB) {
 111     _desired_plab_sz = plab_sz;
 112   }
 113   // Now clear the accumulators for next round:
 114   // note this needs to be fixed in the case where we
 115   // are retaining across scavenges. FIX ME !!! XXX
 116   _allocated = 0;
 117   _wasted    = 0;
 118   _unused    = 0;
 119 }
 120 
 121 #ifndef PRODUCT
 122 void ParGCAllocBuffer::print() {
 123   gclog_or_tty->print("parGCAllocBuffer: _bottom: %p  _top: %p  _end: %p  _hard_end: %p"
 124              "_retained: %c _retained_filler: [%p,%p)\n",
 125              _bottom, _top, _end, _hard_end,
 126              "FT"[_retained], _retained_filler.start(), _retained_filler.end());
 127 }
 128 #endif // !PRODUCT
 129 
 130 const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
 131 MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
 132      ((size_t)Generation::GenGrain)/HeapWordSize);
 133 const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
 134 MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
 135      (size_t)Generation::GenGrain);
 136 
 137 ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
 138                                                  BlockOffsetSharedArray* bsa) :
 139   ParGCAllocBuffer(word_sz),
 140   _bsa(bsa),
 141   _bt(bsa, MemRegion(_bottom, _hard_end)),
 142   _true_end(_hard_end)
 143 {}
 144 
 145 // The buffer comes with its own BOT, with a shared (obviously) underlying
 146 // BlockOffsetSharedArray. We manipulate this BOT in the normal way
 147 // as we would for any contiguous space. However, on accasion we
 148 // need to do some buffer surgery at the extremities before we
 149 // start using the body of the buffer for allocations. Such surgery
 150 // (as explained elsewhere) is to prevent allocation on a card that
 151 // is in the process of being walked concurrently by another GC thread.
 152 // When such surgery happens at a point that is far removed (to the
 153 // right of the current allocation point, top), we use the "contig"
 154 // parameter below to directly manipulate the shared array without
 155 // modifying the _next_threshold state in the BOT.
 156 void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
 157                                                      bool contig) {
 158   CollectedHeap::fill_with_object(mr);
 159   if (contig) {
 160     _bt.alloc_block(mr.start(), mr.end());
 161   } else {
 162     _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
 163   }
 164 }
 165 
 166 HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
 167   HeapWord* res = NULL;
 168   if (_true_end > _hard_end) {
 169     assert((HeapWord*)align_size_down(intptr_t(_hard_end),
 170                                       ChunkSizeInBytes) == _hard_end,
 171            "or else _true_end should be equal to _hard_end");
 172     assert(_retained, "or else _true_end should be equal to _hard_end");
 173     assert(_retained_filler.end() <= _top, "INVARIANT");
 174     CollectedHeap::fill_with_object(_retained_filler);
 175     if (_top < _hard_end) {
 176       fill_region_with_block(MemRegion(_top, _hard_end), true);
 177     }
 178     HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
 179     _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
 180     _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
 181     _top      = _retained_filler.end();
 182     _hard_end = next_hard_end;
 183     _end      = _hard_end - AlignmentReserve;
 184     res       = ParGCAllocBuffer::allocate(word_sz);
 185     if (res != NULL) {
 186       _bt.alloc_block(res, word_sz);
 187     }
 188   }
 189   return res;
 190 }
 191 
 192 void
 193 ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
 194   ParGCAllocBuffer::undo_allocation(obj, word_sz);
 195   // This may back us up beyond the previous threshold, so reset.
 196   _bt.set_region(MemRegion(_top, _hard_end));
 197   _bt.initialize_threshold();
 198 }
 199 
 200 void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
 201   assert(!retain || end_of_gc, "Can only retain at GC end.");
 202   if (_retained) {
 203     // We're about to make the retained_filler into a block.
 204     _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
 205                                       _retained_filler.end());
 206   }
 207   // Reset _hard_end to _true_end (and update _end)
 208   if (retain && _hard_end != NULL) {
 209     assert(_hard_end <= _true_end, "Invariant.");
 210     _hard_end = _true_end;
 211     _end      = MAX2(_top, _hard_end - AlignmentReserve);
 212     assert(_end <= _hard_end, "Invariant.");
 213   }
 214   _true_end = _hard_end;
 215   HeapWord* pre_top = _top;
 216 
 217   ParGCAllocBuffer::retire(end_of_gc, retain);
 218   // Now any old _retained_filler is cut back to size, the free part is
 219   // filled with a filler object, and top is past the header of that
 220   // object.
 221 
 222   if (retain && _top < _end) {
 223     assert(end_of_gc && retain, "Or else retain should be false.");
 224     // If the lab does not start on a card boundary, we don't want to
 225     // allocate onto that card, since that might lead to concurrent
 226     // allocation and card scanning, which we don't support.  So we fill
 227     // the first card with a garbage object.
 228     size_t first_card_index = _bsa->index_for(pre_top);
 229     HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
 230     if (first_card_start < pre_top) {
 231       HeapWord* second_card_start =
 232         _bsa->inc_by_region_size(first_card_start);
 233 
 234       // Ensure enough room to fill with the smallest block
 235       second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
 236 
 237       // If the end is already in the first card, don't go beyond it!
 238       // Or if the remainder is too small for a filler object, gobble it up.
 239       if (_hard_end < second_card_start ||
 240           pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
 241         second_card_start = _hard_end;
 242       }
 243       if (pre_top < second_card_start) {
 244         MemRegion first_card_suffix(pre_top, second_card_start);
 245         fill_region_with_block(first_card_suffix, true);
 246       }
 247       pre_top = second_card_start;
 248       _top = pre_top;
 249       _end = MAX2(_top, _hard_end - AlignmentReserve);
 250     }
 251 
 252     // If the lab does not end on a card boundary, we don't want to
 253     // allocate onto that card, since that might lead to concurrent
 254     // allocation and card scanning, which we don't support.  So we fill
 255     // the last card with a garbage object.
 256     size_t last_card_index = _bsa->index_for(_hard_end);
 257     HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
 258     if (last_card_start < _hard_end) {
 259 
 260       // Ensure enough room to fill with the smallest block
 261       last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
 262 
 263       // If the top is already in the last card, don't go back beyond it!
 264       // Or if the remainder is too small for a filler object, gobble it up.
 265       if (_top > last_card_start ||
 266           pointer_delta(last_card_start, _top) < AlignmentReserve) {
 267         last_card_start = _top;
 268       }
 269       if (last_card_start < _hard_end) {
 270         MemRegion last_card_prefix(last_card_start, _hard_end);
 271         fill_region_with_block(last_card_prefix, false);
 272       }
 273       _hard_end = last_card_start;
 274       _end      = MAX2(_top, _hard_end - AlignmentReserve);
 275       _true_end = _hard_end;
 276       assert(_end <= _hard_end, "Invariant.");
 277     }
 278 
 279     // At this point:
 280     //   1) we had a filler object from the original top to hard_end.
 281     //   2) We've filled in any partial cards at the front and back.
 282     if (pre_top < _hard_end) {
 283       // Now we can reset the _bt to do allocation in the given area.
 284       MemRegion new_filler(pre_top, _hard_end);
 285       fill_region_with_block(new_filler, false);
 286       _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
 287       // If there's no space left, don't retain.
 288       if (_top >= _end) {
 289         _retained = false;
 290         invalidate();
 291         return;
 292       }
 293       _retained_filler = MemRegion(pre_top, _top);
 294       _bt.set_region(MemRegion(_top, _hard_end));
 295       _bt.initialize_threshold();
 296       assert(_bt.threshold() > _top, "initialize_threshold failed!");
 297 
 298       // There may be other reasons for queries into the middle of the
 299       // filler object.  When such queries are done in parallel with
 300       // allocation, bad things can happen, if the query involves object
 301       // iteration.  So we ensure that such queries do not involve object
 302       // iteration, by putting another filler object on the boundaries of
 303       // such queries.  One such is the object spanning a parallel card
 304       // chunk boundary.
 305 
 306       // "chunk_boundary" is the address of the first chunk boundary less
 307       // than "hard_end".
 308       HeapWord* chunk_boundary =
 309         (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
 310       assert(chunk_boundary < _hard_end, "Or else above did not work.");
 311       assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
 312              "Consequence of last card handling above.");
 313 
 314       if (_top <= chunk_boundary) {
 315         assert(_true_end == _hard_end, "Invariant.");
 316         while (_top <= chunk_boundary) {
 317           assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
 318                  "Consequence of last card handling above.");
 319           _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
 320           CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
 321           _hard_end = chunk_boundary;
 322           chunk_boundary -= ChunkSizeInWords;
 323         }
 324         _end = _hard_end - AlignmentReserve;
 325         assert(_top <= _end, "Invariant.");
 326         // Now reset the initial filler chunk so it doesn't overlap with
 327         // the one(s) inserted above.
 328         MemRegion new_filler(pre_top, _hard_end);
 329         fill_region_with_block(new_filler, false);
 330       }
 331     } else {
 332       _retained = false;
 333       invalidate();
 334     }
 335   } else {
 336     assert(!end_of_gc ||
 337            (!_retained && _true_end == _hard_end), "Checking.");
 338   }
 339   assert(_end <= _hard_end, "Invariant.");
 340   assert(_top < _end || _top == _hard_end, "Invariant");
 341 }