< prev index next >

src/hotspot/share/gc/shared/plab.cpp

Print this page
rev 48920 : [backport] Use PLAB for evacuations instead of TLAB


  27 #include "gc/shared/plab.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 
  33 size_t PLAB::min_size() {
  34   // Make sure that we return something that is larger than AlignmentReserve
  35   return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
  36 }
  37 
  38 size_t PLAB::max_size() {
  39   return ThreadLocalAllocBuffer::max_size();
  40 }
  41 
  42 PLAB::PLAB(size_t desired_plab_sz_) :
  43   _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  44   _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
  45 {
  46   // ArrayOopDesc::header_size depends on command line initialization.
  47   AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;


  48   assert(min_size() > AlignmentReserve,
  49          "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
  50          "to be able to contain objects", min_size(), AlignmentReserve);
  51 }
  52 
  53 // If the minimum object size is greater than MinObjAlignment, we can
  54 // end up with a shard at the end of the buffer that's smaller than
  55 // the smallest object.  We can't allow that because the buffer must
  56 // look like it's full of objects when we retire it, so we make
  57 // sure we have enough space for a filler int array object.
  58 size_t PLAB::AlignmentReserve;
  59 
  60 void PLAB::flush_and_retire_stats(PLABStats* stats) {
  61   // Retire the last allocation buffer.
  62   size_t unused = retire_internal();
  63 
  64   // Now flush the statistics.
  65   stats->add_allocated(_allocated);
  66   stats->add_wasted(_wasted);
  67   stats->add_undo_wasted(_undo_wasted);
  68   stats->add_unused(unused);
  69 
  70   // Since we have flushed the stats we need to clear  the _allocated and _wasted
  71   // fields in case somebody retains an instance of this over GCs. Not doing so
  72   // will artifically inflate the values in the statistics.
  73   _allocated   = 0;
  74   _wasted      = 0;
  75   _undo_wasted = 0;
  76 }
  77 
  78 void PLAB::retire() {
  79   _wasted += retire_internal();
  80 }
  81 
  82 size_t PLAB::retire_internal() {
  83   size_t result = 0;
  84   if (_top < _hard_end) {
  85     CollectedHeap::fill_with_object(_top, _hard_end);



  86     result += invalidate();
  87   }
  88   return result;
  89 }
  90 
  91 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
  92   CollectedHeap::fill_with_object(obj, word_sz);

  93   _undo_wasted += word_sz;
  94 }
  95 
  96 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) {
  97   assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
  98   assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
  99   _top = obj;
 100 }
 101 
 102 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
 103   // Is the alloc in the current alloc buffer?
 104   if (contains(obj)) {
 105     assert(contains(obj + word_sz - 1),
 106       "should contain whole object");
 107     undo_last_allocation(obj, word_sz);
 108   } else {
 109     add_undo_waste(obj, word_sz);
 110   }
 111 }
 112 




  27 #include "gc/shared/plab.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.hpp"
  29 #include "logging/log.hpp"
  30 #include "oops/arrayOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 
  33 size_t PLAB::min_size() {
  34   // Make sure that we return something that is larger than AlignmentReserve
  35   return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
  36 }
  37 
  38 size_t PLAB::max_size() {
  39   return ThreadLocalAllocBuffer::max_size();
  40 }
  41 
  42 PLAB::PLAB(size_t desired_plab_sz_) :
  43   _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  44   _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
  45 {
  46   // ArrayOopDesc::header_size depends on command line initialization.
  47   int rsv_regular = oopDesc::header_size() + (int) Universe::heap()->oop_extra_words();
  48   int rsv_array   = align_object_size(arrayOopDesc::header_size(T_INT) + Universe::heap()->oop_extra_words());
  49   AlignmentReserve = rsv_regular > MinObjAlignment ? rsv_array : 0;
  50   assert(min_size() > AlignmentReserve,
  51          "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
  52          "to be able to contain objects", min_size(), AlignmentReserve);
  53 }
  54 
  55 // If the minimum object size is greater than MinObjAlignment, we can
  56 // end up with a shard at the end of the buffer that's smaller than
  57 // the smallest object.  We can't allow that because the buffer must
  58 // look like it's full of objects when we retire it, so we make
  59 // sure we have enough space for a filler int array object.
  60 size_t PLAB::AlignmentReserve;
  61 
  62 void PLAB::flush_and_retire_stats(PLABStats* stats) {
  63   // Retire the last allocation buffer.
  64   size_t unused = retire_internal();
  65 
  66   // Now flush the statistics.
  67   stats->add_allocated(_allocated);
  68   stats->add_wasted(_wasted);
  69   stats->add_undo_wasted(_undo_wasted);
  70   stats->add_unused(unused);
  71 
  72   // Since we have flushed the stats we need to clear  the _allocated and _wasted
  73   // fields in case somebody retains an instance of this over GCs. Not doing so
  74   // will artifically inflate the values in the statistics.
  75   _allocated   = 0;
  76   _wasted      = 0;
  77   _undo_wasted = 0;
  78 }
  79 
  80 void PLAB::retire() {
  81   _wasted += retire_internal();
  82 }
  83 
  84 size_t PLAB::retire_internal() {
  85   size_t result = 0;
  86   if (_top < _hard_end) {
  87     assert(pointer_delta(_hard_end, _top) >= (size_t)(oopDesc::header_size() + Universe::heap()->oop_extra_words()),
  88            "better have enough space left to fill with dummy");
  89     HeapWord* obj = Universe::heap()->tlab_post_allocation_setup(_top);
  90     CollectedHeap::fill_with_object(obj, _hard_end);
  91     result += invalidate();
  92   }
  93   return result;
  94 }
  95 
  96 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
  97   HeapWord* head_obj = Universe::heap()->tlab_post_allocation_setup(obj);
  98   CollectedHeap::fill_with_object(head_obj, word_sz - (head_obj - obj));
  99   _undo_wasted += word_sz;
 100 }
 101 
 102 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) {
 103   assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
 104   assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
 105   _top = obj;
 106 }
 107 
 108 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
 109   // Is the alloc in the current alloc buffer?
 110   if (contains(obj)) {
 111     assert(contains(obj + word_sz - 1),
 112       "should contain whole object");
 113     undo_last_allocation(obj, word_sz);
 114   } else {
 115     add_undo_waste(obj, word_sz);
 116   }
 117 }
 118 


< prev index next >