7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/plab.hpp"
27 #include "memory/threadLocalAllocBuffer.hpp"
28 #include "oops/arrayOop.hpp"
29 #include "oops/oop.inline.hpp"
30
31 size_t PLAB::min_size() {
32 // Make sure that we return something that is larger than AlignmentReserve
33 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
34 }
35
36 size_t PLAB::max_size() {
37 return ThreadLocalAllocBuffer::max_size();
38 }
39
40 PLAB::PLAB(size_t desired_plab_sz_) :
41 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
42 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0)
43 {
44 // ArrayOopDesc::header_size depends on command line initialization.
45 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
46 assert(min_size() > AlignmentReserve,
47 err_msg("Minimum PLAB size " SIZE_FORMAT" must be larger than alignment reserve " SIZE_FORMAT" "
48 "to be able to contain objects", min_size(), AlignmentReserve));
49 }
50
51 // If the minimum object size is greater than MinObjAlignment, we can
52 // end up with a shard at the end of the buffer that's smaller than
53 // the smallest object. We can't allow that because the buffer must
54 // look like it's full of objects when we retire it, so we make
55 // sure we have enough space for a filler int array object.
56 size_t PLAB::AlignmentReserve;
57
58 void PLAB::flush_and_retire_stats(PLABStats* stats) {
59 // Retire the last allocation buffer.
60 size_t unused = retire_internal();
61
62 // Now flush the statistics.
63 stats->add_allocated(_allocated);
64 stats->add_wasted(_wasted);
65 stats->add_unused(unused);
66
67 // Since we have flushed the stats we need to clear the _allocated and _wasted
68 // fields in case somebody retains an instance of this over GCs. Not doing so
69 // will artifically inflate the values in the statistics.
70 _allocated = 0;
71 _wasted = 0;
72 }
73
74 void PLAB::retire() {
75 _wasted += retire_internal();
76 }
77
78 size_t PLAB::retire_internal() {
79 size_t result = 0;
80 if (_top < _hard_end) {
81 CollectedHeap::fill_with_object(_top, _hard_end);
82 result += invalidate();
83 }
84 return result;
85 }
86
87 // Compute desired plab size and latch result for later
88 // use. This should be called once at the end of parallel
89 // scavenge; it clears the sensor accumulators.
90 void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
91 assert(ResizePLAB, "Not set");
92
93 assert(is_object_aligned(max_size()) && min_size() <= max_size(),
94 "PLAB clipping computation may be incorrect");
95
96 if (_allocated == 0) {
97 assert(_unused == 0,
98 err_msg("Inconsistency in PLAB stats: "
99 "_allocated: "SIZE_FORMAT", "
100 "_wasted: "SIZE_FORMAT", "
101 "_unused: "SIZE_FORMAT,
102 _allocated, _wasted, _unused));
103
104 _allocated = 1;
105 }
106 double wasted_frac = (double)_unused / (double)_allocated;
107 size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
108 if (target_refills == 0) {
109 target_refills = 1;
110 }
111 size_t used = _allocated - _wasted - _unused;
112 size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
113 // Take historical weighted average
114 _filter.sample(recent_plab_sz);
115 // Clip from above and below, and align to object boundary
116 size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
117 new_plab_sz = MIN2(max_size(), new_plab_sz);
118 new_plab_sz = align_object_size(new_plab_sz);
119 // Latch the result
120 if (PrintPLAB) {
121 gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
122 }
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/plab.hpp"
27 #include "gc_interface/collectedHeap.hpp"
28 #include "memory/threadLocalAllocBuffer.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/oop.inline.hpp"
31
32 size_t PLAB::min_size() {
33 // Make sure that we return something that is larger than AlignmentReserve
34 return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
35 }
36
37 size_t PLAB::max_size() {
38 return ThreadLocalAllocBuffer::max_size();
39 }
40
41 PLAB::PLAB(size_t desired_plab_sz_) :
42 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
43 _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
44 {
45 // ArrayOopDesc::header_size depends on command line initialization.
46 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
47 assert(min_size() > AlignmentReserve,
48 err_msg("Minimum PLAB size " SIZE_FORMAT" must be larger than alignment reserve " SIZE_FORMAT" "
49 "to be able to contain objects", min_size(), AlignmentReserve));
50 }
51
52 // If the minimum object size is greater than MinObjAlignment, we can
53 // end up with a shard at the end of the buffer that's smaller than
54 // the smallest object. We can't allow that because the buffer must
55 // look like it's full of objects when we retire it, so we make
56 // sure we have enough space for a filler int array object.
57 size_t PLAB::AlignmentReserve;
58
59 void PLAB::flush_and_retire_stats(PLABStats* stats) {
60 // Retire the last allocation buffer.
61 size_t unused = retire_internal();
62
63 // Now flush the statistics.
64 stats->add_allocated(_allocated);
65 stats->add_wasted(_wasted);
66 stats->add_undo_wasted(_undo_wasted);
67 stats->add_unused(unused);
68
69 // Since we have flushed the stats we need to clear the _allocated and _wasted
70 // fields in case somebody retains an instance of this over GCs. Not doing so
71 // will artifically inflate the values in the statistics.
72 _allocated = 0;
73 _wasted = 0;
74 _undo_wasted = 0;
75 }
76
77 void PLAB::retire() {
78 _wasted += retire_internal();
79 }
80
81 size_t PLAB::retire_internal() {
82 size_t result = 0;
83 if (_top < _hard_end) {
84 CollectedHeap::fill_with_object(_top, _hard_end);
85 result += invalidate();
86 }
87 return result;
88 }
89
90 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
91 CollectedHeap::fill_with_object(obj, word_sz);
92 _undo_wasted += word_sz;
93 }
94
95 void PLAB::undo_last_allocation(HeapWord* obj, size_t word_sz) {
96 assert(pointer_delta(_top, _bottom) >= word_sz, "Bad undo");
97 assert(pointer_delta(_top, obj) == word_sz, "Bad undo");
98 _top = obj;
99 }
100
101 void PLAB::undo_allocation(HeapWord* obj, size_t word_sz) {
102 // Is the alloc in the current alloc buffer?
103 if (contains(obj)) {
104 assert(contains(obj + word_sz - 1),
105 "should contain whole object");
106 undo_last_allocation(obj, word_sz);
107 } else {
108 add_undo_waste(obj, word_sz);
109 }
110 }
111
112 // Compute desired plab size and latch result for later
113 // use. This should be called once at the end of parallel
114 // scavenge; it clears the sensor accumulators.
115 void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
116 assert(ResizePLAB, "Not set");
117
118 assert(is_object_aligned(max_size()) && min_size() <= max_size(),
119 "PLAB clipping computation may be incorrect");
120
121 if (_allocated == 0) {
122 assert(_unused == 0,
123 err_msg("Inconsistency in PLAB stats: "
124 "_allocated: "SIZE_FORMAT", "
125 "_wasted: "SIZE_FORMAT", "
126 "_unused: "SIZE_FORMAT", "
127 "_undo_wasted: "SIZE_FORMAT,
128 _allocated, _wasted, _unused, _undo_wasted));
129
130 _allocated = 1;
131 }
132 double wasted_frac = (double)_unused / (double)_allocated;
133 size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
134 if (target_refills == 0) {
135 target_refills = 1;
136 }
137 size_t used = _allocated - _wasted - _unused;
138 size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
139 // Take historical weighted average
140 _filter.sample(recent_plab_sz);
141 // Clip from above and below, and align to object boundary
142 size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
143 new_plab_sz = MIN2(max_size(), new_plab_sz);
144 new_plab_sz = align_object_size(new_plab_sz);
145 // Latch the result
146 if (PrintPLAB) {
147 gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
148 }
|