1 /* 2 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_UTILITIES_STACK_INLINE_HPP 26 #define SHARE_VM_UTILITIES_STACK_INLINE_HPP 27 28 #include "utilities/stack.hpp" 29 30 StackBase::StackBase(size_t segment_size, size_t max_cache_size, 31 size_t max_size): 32 _seg_size(segment_size), 33 _max_cache_size(max_cache_size), 34 _max_size(adjust_max_size(max_size, segment_size)) 35 { 36 assert(_max_size % _seg_size == 0, "not a multiple"); 37 } 38 39 size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size) 40 { 41 assert(seg_size > 0, "cannot be 0"); 42 assert(max_size >= seg_size || max_size == 0, "max_size too small"); 43 const size_t limit = max_uintx - (seg_size - 1); 44 if (max_size == 0 || max_size > limit) { 45 max_size = limit; 46 } 47 return (max_size + seg_size - 1) / seg_size * seg_size; 48 } 49 50 template <class E> 51 Stack<E>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size): 52 StackBase(adjust_segment_size(segment_size), max_cache_size, max_size) 53 { 54 reset(true); 55 } 56 57 template <class E> 58 void Stack<E>::push(E item) 59 { 60 assert(!is_full(), "pushing onto a full stack"); 61 if (_cur_seg_size == _seg_size) { 62 push_segment(); 63 } 64 _cur_seg[_cur_seg_size] = item; 65 ++_cur_seg_size; 66 } 67 68 template <class E> 69 E Stack<E>::pop() 70 { 71 assert(!is_empty(), "popping from an empty stack"); 72 if (_cur_seg_size == 1) { 73 E tmp = _cur_seg[--_cur_seg_size]; 74 pop_segment(); 75 return tmp; 76 } 77 return _cur_seg[--_cur_seg_size]; 78 } 79 80 template <class E> 81 void Stack<E>::clear(bool clear_cache) 82 { 83 free_segments(_cur_seg); 84 if (clear_cache) free_segments(_cache); 85 reset(clear_cache); 86 } 87 88 template <class E> 89 size_t Stack<E>::default_segment_size() 90 { 91 // Number of elements that fit in 4K bytes minus the size of two pointers 92 // (link field and malloc header). 93 return (4096 - 2 * sizeof(E*)) / sizeof(E); 94 } 95 96 template <class E> 97 size_t Stack<E>::adjust_segment_size(size_t seg_size) 98 { 99 const size_t elem_sz = sizeof(E); 100 const size_t ptr_sz = sizeof(E*); 101 assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size"); 102 if (elem_sz < ptr_sz) { 103 return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz; 104 } 105 return seg_size; 106 } 107 108 template <class E> 109 size_t Stack<E>::link_offset() const 110 { 111 return align_size_up(_seg_size * sizeof(E), sizeof(E*)); 112 } 113 114 template <class E> 115 size_t Stack<E>::segment_bytes() const 116 { 117 return link_offset() + sizeof(E*); 118 } 119 120 template <class E> 121 E** Stack<E>::link_addr(E* seg) const 122 { 123 return (E**) ((char*)seg + link_offset()); 124 } 125 126 template <class E> 127 E* Stack<E>::get_link(E* seg) const 128 { 129 return *link_addr(seg); 130 } 131 132 template <class E> 133 E* Stack<E>::set_link(E* new_seg, E* old_seg) 134 { 135 *link_addr(new_seg) = old_seg; 136 return new_seg; 137 } 138 139 template <class E> 140 E* Stack<E>::alloc(size_t bytes) 141 { 142 return (E*) NEW_C_HEAP_ARRAY(char, bytes); 143 } 144 145 template <class E> 146 void Stack<E>::free(E* addr, size_t bytes) 147 { 148 FREE_C_HEAP_ARRAY(char, (char*) addr); 149 } 150 151 template <class E> 152 void Stack<E>::push_segment() 153 { 154 assert(_cur_seg_size == _seg_size, "current segment is not full"); 155 E* next; 156 if (_cache_size > 0) { 157 // Use a cached segment. 158 next = _cache; 159 _cache = get_link(_cache); 160 --_cache_size; 161 } else { 162 next = alloc(segment_bytes()); 163 DEBUG_ONLY(zap_segment(next, true);) 164 } 165 const bool at_empty_transition = is_empty(); 166 _cur_seg = set_link(next, _cur_seg); 167 _cur_seg_size = 0; 168 _full_seg_size += at_empty_transition ? 0 : _seg_size; 169 DEBUG_ONLY(verify(at_empty_transition);) 170 } 171 172 template <class E> 173 void Stack<E>::pop_segment() 174 { 175 assert(_cur_seg_size == 0, "current segment is not empty"); 176 E* const prev = get_link(_cur_seg); 177 if (_cache_size < _max_cache_size) { 178 // Add the current segment to the cache. 179 DEBUG_ONLY(zap_segment(_cur_seg, false);) 180 _cache = set_link(_cur_seg, _cache); 181 ++_cache_size; 182 } else { 183 DEBUG_ONLY(zap_segment(_cur_seg, true);) 184 free(_cur_seg, segment_bytes()); 185 } 186 const bool at_empty_transition = prev == NULL; 187 _cur_seg = prev; 188 _cur_seg_size = _seg_size; 189 _full_seg_size -= at_empty_transition ? 0 : _seg_size; 190 DEBUG_ONLY(verify(at_empty_transition);) 191 } 192 193 template <class E> 194 void Stack<E>::free_segments(E* seg) 195 { 196 const size_t bytes = segment_bytes(); 197 while (seg != NULL) { 198 E* const prev = get_link(seg); 199 free(seg, bytes); 200 seg = prev; 201 } 202 } 203 204 template <class E> 205 void Stack<E>::reset(bool reset_cache) 206 { 207 _cur_seg_size = _seg_size; // So push() will alloc a new segment. 208 _full_seg_size = 0; 209 _cur_seg = NULL; 210 if (reset_cache) { 211 _cache_size = 0; 212 _cache = NULL; 213 } 214 } 215 216 #ifdef ASSERT 217 template <class E> 218 void Stack<E>::verify(bool at_empty_transition) const 219 { 220 assert(size() <= max_size(), "stack exceeded bounds"); 221 assert(cache_size() <= max_cache_size(), "cache exceeded bounds"); 222 assert(_cur_seg_size <= segment_size(), "segment index exceeded bounds"); 223 224 assert(_full_seg_size % _seg_size == 0, "not a multiple"); 225 assert(at_empty_transition || is_empty() == (size() == 0), "mismatch"); 226 assert((_cache == NULL) == (cache_size() == 0), "mismatch"); 227 228 if (is_empty()) { 229 assert(_cur_seg_size == segment_size(), "sanity"); 230 } 231 } 232 233 template <class E> 234 void Stack<E>::zap_segment(E* seg, bool zap_link_field) const 235 { 236 if (!ZapStackSegments) return; 237 const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*)); 238 uint32_t* cur = (uint32_t*)seg; 239 const uint32_t* end = cur + zap_bytes / sizeof(uint32_t); 240 while (cur < end) { 241 *cur++ = 0xfadfaded; 242 } 243 } 244 #endif 245 246 template <class E> 247 E* ResourceStack<E>::alloc(size_t bytes) 248 { 249 return (E*) resource_allocate_bytes(bytes); 250 } 251 252 template <class E> 253 void ResourceStack<E>::free(E* addr, size_t bytes) 254 { 255 resource_free_bytes((char*) addr, bytes); 256 } 257 258 template <class E> 259 void StackIterator<E>::sync() 260 { 261 _full_seg_size = _stack._full_seg_size; 262 _cur_seg_size = _stack._cur_seg_size; 263 _cur_seg = _stack._cur_seg; 264 } 265 266 template <class E> 267 E* StackIterator<E>::next_addr() 268 { 269 assert(!is_empty(), "no items left"); 270 if (_cur_seg_size == 1) { 271 E* addr = _cur_seg; 272 _cur_seg = _stack.get_link(_cur_seg); 273 _cur_seg_size = _stack.segment_size(); 274 _full_seg_size -= _stack.segment_size(); 275 return addr; 276 } 277 return _cur_seg + --_cur_seg_size; 278 } 279 280 #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP