28 #include "gc/shared/threadLocalAllocBuffer.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/oop.inline.hpp"
31
32 size_t PLAB::min_size() {
33 // Make sure that we return something that is larger than AlignmentReserve
34 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
35 }
36
37 size_t PLAB::max_size() {
38 return ThreadLocalAllocBuffer::max_size();
39 }
40
41 PLAB::PLAB(size_t desired_plab_sz_) :
42 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
43 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
44 {
45 // ArrayOopDesc::header_size depends on command line initialization.
46 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
47 assert(min_size() > AlignmentReserve,
48 err_msg("Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
49 "to be able to contain objects", min_size(), AlignmentReserve));
50 }
51
52 // If the minimum object size is greater than MinObjAlignment, we can
53 // end up with a shard at the end of the buffer that's smaller than
54 // the smallest object. We can't allow that because the buffer must
55 // look like it's full of objects when we retire it, so we make
56 // sure we have enough space for a filler int array object.
57 size_t PLAB::AlignmentReserve;
58
59 void PLAB::flush_and_retire_stats(PLABStats* stats) {
60 // Retire the last allocation buffer.
61 size_t unused = retire_internal();
62
63 // Now flush the statistics.
64 stats->add_allocated(_allocated);
65 stats->add_wasted(_wasted);
66 stats->add_undo_wasted(_undo_wasted);
67 stats->add_unused(unused);
68
69 // Since we have flushed the stats we need to clear the _allocated and _wasted
108 add_undo_waste(obj, word_sz);
109 }
110 }
111
112 // Calculates plab size for current number of gc worker threads.
113 size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
114 return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers));
115 }
116
117 // Compute desired plab size for one gc worker thread and latch result for later
118 // use. This should be called once at the end of parallel
119 // scavenge; it clears the sensor accumulators.
120 void PLABStats::adjust_desired_plab_sz() {
121 assert(ResizePLAB, "Not set");
122
123 assert(is_object_aligned(max_size()) && min_size() <= max_size(),
124 "PLAB clipping computation may be incorrect");
125
126 if (_allocated == 0) {
127 assert(_unused == 0,
128 err_msg("Inconsistency in PLAB stats: "
129 "_allocated: " SIZE_FORMAT ", "
130 "_wasted: " SIZE_FORMAT ", "
131 "_unused: " SIZE_FORMAT ", "
132 "_undo_wasted: " SIZE_FORMAT,
133 _allocated, _wasted, _unused, _undo_wasted));
134
135 _allocated = 1;
136 }
137 double wasted_frac = (double)_unused / (double)_allocated;
138 size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
139 if (target_refills == 0) {
140 target_refills = 1;
141 }
142 size_t used = _allocated - _wasted - _unused;
143 // Assumed to have 1 gc worker thread
144 size_t recent_plab_sz = used / target_refills;
145 // Take historical weighted average
146 _filter.sample(recent_plab_sz);
147 // Clip from above and below, and align to object boundary
148 size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
149 new_plab_sz = MIN2(max_size(), new_plab_sz);
150 new_plab_sz = align_object_size(new_plab_sz);
151 // Latch the result
152 if (PrintPLAB) {
153 gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_net_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
|
28 #include "gc/shared/threadLocalAllocBuffer.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/oop.inline.hpp"
31
32 size_t PLAB::min_size() {
33 // Make sure that we return something that is larger than AlignmentReserve
34 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
35 }
36
37 size_t PLAB::max_size() {
38 return ThreadLocalAllocBuffer::max_size();
39 }
40
41 PLAB::PLAB(size_t desired_plab_sz_) :
42 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
43 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
44 {
45 // ArrayOopDesc::header_size depends on command line initialization.
46 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
47 assert(min_size() > AlignmentReserve,
48 "Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
49 "to be able to contain objects", min_size(), AlignmentReserve);
50 }
51
52 // If the minimum object size is greater than MinObjAlignment, we can
53 // end up with a shard at the end of the buffer that's smaller than
54 // the smallest object. We can't allow that because the buffer must
55 // look like it's full of objects when we retire it, so we make
56 // sure we have enough space for a filler int array object.
57 size_t PLAB::AlignmentReserve;
58
59 void PLAB::flush_and_retire_stats(PLABStats* stats) {
60 // Retire the last allocation buffer.
61 size_t unused = retire_internal();
62
63 // Now flush the statistics.
64 stats->add_allocated(_allocated);
65 stats->add_wasted(_wasted);
66 stats->add_undo_wasted(_undo_wasted);
67 stats->add_unused(unused);
68
69 // Since we have flushed the stats we need to clear the _allocated and _wasted
108 add_undo_waste(obj, word_sz);
109 }
110 }
111
112 // Calculates plab size for current number of gc worker threads.
113 size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
114 return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers));
115 }
116
117 // Compute desired plab size for one gc worker thread and latch result for later
118 // use. This should be called once at the end of parallel
119 // scavenge; it clears the sensor accumulators.
120 void PLABStats::adjust_desired_plab_sz() {
121 assert(ResizePLAB, "Not set");
122
123 assert(is_object_aligned(max_size()) && min_size() <= max_size(),
124 "PLAB clipping computation may be incorrect");
125
126 if (_allocated == 0) {
127 assert(_unused == 0,
128 "Inconsistency in PLAB stats: "
129 "_allocated: " SIZE_FORMAT ", "
130 "_wasted: " SIZE_FORMAT ", "
131 "_unused: " SIZE_FORMAT ", "
132 "_undo_wasted: " SIZE_FORMAT,
133 _allocated, _wasted, _unused, _undo_wasted);
134
135 _allocated = 1;
136 }
137 double wasted_frac = (double)_unused / (double)_allocated;
138 size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
139 if (target_refills == 0) {
140 target_refills = 1;
141 }
142 size_t used = _allocated - _wasted - _unused;
143 // Assumed to have 1 gc worker thread
144 size_t recent_plab_sz = used / target_refills;
145 // Take historical weighted average
146 _filter.sample(recent_plab_sz);
147 // Clip from above and below, and align to object boundary
148 size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
149 new_plab_sz = MIN2(max_size(), new_plab_sz);
150 new_plab_sz = align_object_size(new_plab_sz);
151 // Latch the result
152 if (PrintPLAB) {
153 gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT " desired_net_plab_sz = " SIZE_FORMAT ") ", recent_plab_sz, new_plab_sz);
|