1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_PLAB_HPP 26 #define SHARE_VM_GC_SHARED_PLAB_HPP 27 28 #include "gc/shared/gcUtil.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "runtime/atomic.inline.hpp" 31 #include "utilities/globalDefinitions.hpp" 32 33 // Forward declarations. 34 class PLABStats; 35 36 // A per-thread allocation buffer used during GC. 37 class PLAB: public CHeapObj<mtGC> { 38 protected: 39 char head[32]; 40 size_t _word_sz; // In HeapWord units 41 HeapWord* _bottom; 42 HeapWord* _top; 43 HeapWord* _end; // Last allocatable address + 1 44 HeapWord* _hard_end; // _end + AlignmentReserve 45 // In support of ergonomic sizing of PLAB's 46 size_t _allocated; // in HeapWord units 47 size_t _wasted; // in HeapWord units 48 size_t _undo_wasted; 49 char tail[32]; 50 static size_t AlignmentReserve; 51 52 // Force future allocations to fail and queries for contains() 53 // to return false. Returns the amount of unused space in this PLAB. 54 size_t invalidate() { 55 _end = _hard_end; 56 size_t remaining = pointer_delta(_end, _top); // Calculate remaining space. 57 _top = _end; // Force future allocations to fail. 58 _bottom = _end; // Force future contains() queries to return false. 59 return remaining; 60 } 61 62 // Fill in remaining space with a dummy object and invalidate the PLAB. Returns 63 // the amount of remaining space. 64 size_t retire_internal(); 65 66 void add_undo_waste(HeapWord* obj, size_t word_sz); 67 68 // Undo the last allocation in the buffer, which is required to be of the 69 // "obj" of the given "word_sz". 70 void undo_last_allocation(HeapWord* obj, size_t word_sz); 71 72 public: 73 // Initializes the buffer to be empty, but with the given "word_sz". 74 // Must get initialized with "set_buf" for an allocation to succeed. 75 PLAB(size_t word_sz); 76 virtual ~PLAB() {} 77 78 // Minimum PLAB size. 79 static size_t min_size(); 80 // Maximum PLAB size. 81 static size_t max_size(); 82 83 // If an allocation of the given "word_sz" can be satisfied within the 84 // buffer, do the allocation, returning a pointer to the start of the 85 // allocated block. If the allocation request cannot be satisfied, 86 // return NULL. 87 HeapWord* allocate(size_t word_sz) { 88 HeapWord* res = _top; 89 if (pointer_delta(_end, _top) >= word_sz) { 90 _top = _top + word_sz; 91 return res; 92 } else { 93 return NULL; 94 } 95 } 96 97 // Allocate the object aligned to "alignment_in_bytes". 98 HeapWord* allocate_aligned(size_t word_sz, unsigned short alignment_in_bytes); 99 100 // Undo any allocation in the buffer, which is required to be of the 101 // "obj" of the given "word_sz". 102 void undo_allocation(HeapWord* obj, size_t word_sz); 103 104 // The total (word) size of the buffer, including both allocated and 105 // unallocated space. 106 size_t word_sz() { return _word_sz; } 107 108 size_t waste() { return _wasted; } 109 size_t undo_waste() { return _undo_wasted; } 110 111 // Should only be done if we are about to reset with a new buffer of the 112 // given size. 113 void set_word_size(size_t new_word_sz) { 114 assert(new_word_sz > AlignmentReserve, "Too small"); 115 _word_sz = new_word_sz; 116 } 117 118 // The number of words of unallocated space remaining in the buffer. 119 size_t words_remaining() { 120 assert(_end >= _top, "Negative buffer"); 121 return pointer_delta(_end, _top, HeapWordSize); 122 } 123 124 bool contains(void* addr) { 125 return (void*)_bottom <= addr && addr < (void*)_hard_end; 126 } 127 128 // Sets the space of the buffer to be [buf, space+word_sz()). 129 virtual void set_buf(HeapWord* buf) { 130 _bottom = buf; 131 _top = _bottom; 132 _hard_end = _bottom + word_sz(); 133 _end = _hard_end - AlignmentReserve; 134 assert(_end >= _top, "Negative buffer"); 135 // In support of ergonomic sizing 136 _allocated += word_sz(); 137 } 138 139 // Flush allocation statistics into the given PLABStats supporting ergonomic 140 // sizing of PLAB's and retire the current buffer. To be called at the end of 141 // GC. 142 virtual void flush_and_retire_stats(PLABStats* stats); 143 144 // Fills in the unallocated portion of the buffer with a garbage object and updates 145 // statistics. To be called during GC. 146 virtual void retire(); 147 148 void print() PRODUCT_RETURN; 149 }; 150 151 // PLAB book-keeping. 152 class PLABStats : public CHeapObj<mtGC> { 153 protected: 154 size_t _allocated; // Total allocated 155 size_t _wasted; // of which wasted (internal fragmentation) 156 size_t _undo_wasted; // of which wasted on undo (is not used for calculation of PLAB size) 157 size_t _unused; // Unused in last buffer 158 size_t _desired_net_plab_sz;// Output of filter (below), suitably trimmed and quantized 159 AdaptiveWeightedAverage 160 _filter; // Integrator with decay 161 162 virtual void reset() { 163 _allocated = 0; 164 _wasted = 0; 165 _undo_wasted = 0; 166 _unused = 0; 167 } 168 public: 169 PLABStats(size_t desired_net_plab_sz_, unsigned wt) : 170 _allocated(0), 171 _wasted(0), 172 _undo_wasted(0), 173 _unused(0), 174 _desired_net_plab_sz(desired_net_plab_sz_), 175 _filter(wt) 176 { } 177 178 virtual ~PLABStats() { } 179 180 static const size_t min_size() { 181 return PLAB::min_size(); 182 } 183 184 static const size_t max_size() { 185 return PLAB::max_size(); 186 } 187 188 // Calculates plab size for current number of gc worker threads. 189 size_t desired_plab_sz(uint no_of_gc_workers); 190 191 // Updates the current desired PLAB size. Computes the new desired PLAB size with one gc worker thread, 192 // updates _desired_plab_sz and clears sensor accumulators. 193 virtual void adjust_desired_plab_sz(); 194 195 void add_allocated(size_t v) { 196 Atomic::add_ptr(v, &_allocated); 197 } 198 199 void add_unused(size_t v) { 200 Atomic::add_ptr(v, &_unused); 201 } 202 203 void add_wasted(size_t v) { 204 Atomic::add_ptr(v, &_wasted); 205 } 206 207 void add_undo_wasted(size_t v) { 208 Atomic::add_ptr(v, &_undo_wasted); 209 } 210 }; 211 212 #endif // SHARE_VM_GC_SHARED_PLAB_HPP