1 /*
   2  * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_UTILITIES_STACK_INLINE_HPP
  26 #define SHARE_VM_UTILITIES_STACK_INLINE_HPP
  27 
  28 #include "utilities/align.hpp"
  29 #include "utilities/stack.hpp"
  30 
  31 template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
  32                      size_t max_size):
  33   _seg_size(segment_size),
  34   _max_cache_size(max_cache_size),
  35   _max_size(adjust_max_size(max_size, segment_size))
  36 {
  37   assert(_max_size % _seg_size == 0, "not a multiple");
  38 }
  39 
  40 template <MEMFLAGS F> size_t StackBase<F>::adjust_max_size(size_t max_size, size_t seg_size)
  41 {
  42   assert(seg_size > 0, "cannot be 0");
  43   assert(max_size >= seg_size || max_size == 0, "max_size too small");
  44   const size_t limit = max_uintx - (seg_size - 1);
  45   if (max_size == 0 || max_size > limit) {
  46     max_size = limit;
  47   }
  48   return (max_size + seg_size - 1) / seg_size * seg_size;
  49 }
  50 
  51 template <class E, MEMFLAGS F>
  52 Stack<E, F>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
  53   StackBase<F>(adjust_segment_size(segment_size), max_cache_size, max_size)
  54 {
  55   reset(true);
  56 }
  57 
  58 template <class E, MEMFLAGS F>
  59 void Stack<E, F>::push(E item)
  60 {
  61   assert(!is_full(), "pushing onto a full stack");
  62   if (this->_cur_seg_size == this->_seg_size) {
  63     push_segment();
  64   }
  65   this->_cur_seg[this->_cur_seg_size] = item;
  66   ++this->_cur_seg_size;
  67 }
  68 
  69 template <class E, MEMFLAGS F>
  70 E Stack<E, F>::pop()
  71 {
  72   assert(!is_empty(), "popping from an empty stack");
  73   if (this->_cur_seg_size == 1) {
  74     E tmp = _cur_seg[--this->_cur_seg_size];
  75     pop_segment();
  76     return tmp;
  77   }
  78   return this->_cur_seg[--this->_cur_seg_size];
  79 }
  80 
  81 template <class E, MEMFLAGS F>
  82 void Stack<E, F>::clear(bool clear_cache)
  83 {
  84   free_segments(_cur_seg);
  85   if (clear_cache) free_segments(_cache);
  86   reset(clear_cache);
  87 }
  88 
  89 template <class E, MEMFLAGS F>
  90 size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
  91 {
  92   const size_t elem_sz = sizeof(E);
  93   const size_t ptr_sz = sizeof(E*);
  94   assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
  95   if (elem_sz < ptr_sz) {
  96     return align_up(seg_size * elem_sz, ptr_sz) / elem_sz;
  97   }
  98   return seg_size;
  99 }
 100 
 101 template <class E, MEMFLAGS F>
 102 size_t Stack<E, F>::link_offset() const
 103 {
 104   return align_up(this->_seg_size * sizeof(E), sizeof(E*));
 105 }
 106 
 107 template <class E, MEMFLAGS F>
 108 size_t Stack<E, F>::segment_bytes() const
 109 {
 110   return link_offset() + sizeof(E*);
 111 }
 112 
 113 template <class E, MEMFLAGS F>
 114 E** Stack<E, F>::link_addr(E* seg) const
 115 {
 116   return (E**) ((char*)seg + link_offset());
 117 }
 118 
 119 template <class E, MEMFLAGS F>
 120 E* Stack<E, F>::get_link(E* seg) const
 121 {
 122   return *link_addr(seg);
 123 }
 124 
 125 template <class E, MEMFLAGS F>
 126 E* Stack<E, F>::set_link(E* new_seg, E* old_seg)
 127 {
 128   *link_addr(new_seg) = old_seg;
 129   return new_seg;
 130 }
 131 
 132 template <class E, MEMFLAGS F>
 133 E* Stack<E, F>::alloc(size_t bytes)
 134 {
 135   return (E*) NEW_C_HEAP_ARRAY(char, bytes, F);
 136 }
 137 
 138 template <class E, MEMFLAGS F>
 139 void Stack<E, F>::free(E* addr, size_t bytes)
 140 {
 141   FREE_C_HEAP_ARRAY(char, (char*) addr);
 142 }
 143 
 144 // Stack is used by the GC code and in some hot paths a lot of the Stack
 145 // code gets inlined. This is generally good, but when too much code has
 146 // been inlined, no further inlining is allowed by GCC. Therefore we need
 147 // to prevent parts of the slow path in Stack to be inlined to allow other
 148 // code to be.
 149 template <class E, MEMFLAGS F>
 150 NOINLINE void Stack<E, F>::push_segment()
 151 {
 152   assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
 153   E* next;
 154   if (this->_cache_size > 0) {
 155     // Use a cached segment.
 156     next = _cache;
 157     _cache = get_link(_cache);
 158     --this->_cache_size;
 159   } else {
 160     next = alloc(segment_bytes());
 161     DEBUG_ONLY(zap_segment(next, true);)
 162   }
 163   const bool at_empty_transition = is_empty();
 164   this->_cur_seg = set_link(next, _cur_seg);
 165   this->_cur_seg_size = 0;
 166   this->_full_seg_size += at_empty_transition ? 0 : this->_seg_size;
 167   DEBUG_ONLY(verify(at_empty_transition);)
 168 }
 169 
 170 template <class E, MEMFLAGS F>
 171 void Stack<E, F>::pop_segment()
 172 {
 173   assert(this->_cur_seg_size == 0, "current segment is not empty");
 174   E* const prev = get_link(_cur_seg);
 175   if (this->_cache_size < this->_max_cache_size) {
 176     // Add the current segment to the cache.
 177     DEBUG_ONLY(zap_segment(_cur_seg, false);)
 178     _cache = set_link(_cur_seg, _cache);
 179     ++this->_cache_size;
 180   } else {
 181     DEBUG_ONLY(zap_segment(_cur_seg, true);)
 182     free(_cur_seg, segment_bytes());
 183   }
 184   const bool at_empty_transition = prev == NULL;
 185   this->_cur_seg = prev;
 186   this->_cur_seg_size = this->_seg_size;
 187   this->_full_seg_size -= at_empty_transition ? 0 : this->_seg_size;
 188   DEBUG_ONLY(verify(at_empty_transition);)
 189 }
 190 
 191 template <class E, MEMFLAGS F>
 192 void Stack<E, F>::free_segments(E* seg)
 193 {
 194   const size_t bytes = segment_bytes();
 195   while (seg != NULL) {
 196     E* const prev = get_link(seg);
 197     free(seg, bytes);
 198     seg = prev;
 199   }
 200 }
 201 
 202 template <class E, MEMFLAGS F>
 203 void Stack<E, F>::reset(bool reset_cache)
 204 {
 205   this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment.
 206   this->_full_seg_size = 0;
 207   _cur_seg = NULL;
 208   if (reset_cache) {
 209     this->_cache_size = 0;
 210     _cache = NULL;
 211   }
 212 }
 213 
 214 #ifdef ASSERT
 215 template <class E, MEMFLAGS F>
 216 void Stack<E, F>::verify(bool at_empty_transition) const
 217 {
 218   assert(size() <= this->max_size(), "stack exceeded bounds");
 219   assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds");
 220   assert(this->_cur_seg_size <= this->segment_size(), "segment index exceeded bounds");
 221 
 222   assert(this->_full_seg_size % this->_seg_size == 0, "not a multiple");
 223   assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
 224   assert((_cache == NULL) == (this->cache_size() == 0), "mismatch");
 225 
 226   if (is_empty()) {
 227     assert(this->_cur_seg_size == this->segment_size(), "sanity");
 228   }
 229 }
 230 
 231 template <class E, MEMFLAGS F>
 232 void Stack<E, F>::zap_segment(E* seg, bool zap_link_field) const
 233 {
 234   if (!ZapStackSegments) return;
 235   const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
 236   uint32_t* cur = (uint32_t*)seg;
 237   const uint32_t* end = cur + zap_bytes / sizeof(uint32_t);
 238   while (cur < end) {
 239     *cur++ = 0xfadfaded;
 240   }
 241 }
 242 #endif
 243 
 244 template <class E, MEMFLAGS F>
 245 E* ResourceStack<E, F>::alloc(size_t bytes)
 246 {
 247   return (E*) resource_allocate_bytes(bytes);
 248 }
 249 
 250 template <class E, MEMFLAGS F>
 251 void ResourceStack<E, F>::free(E* addr, size_t bytes)
 252 {
 253   resource_free_bytes((char*) addr, bytes);
 254 }
 255 
 256 template <class E, MEMFLAGS F>
 257 void StackIterator<E, F>::sync()
 258 {
 259   _full_seg_size = _stack._full_seg_size;
 260   _cur_seg_size = _stack._cur_seg_size;
 261   _cur_seg = _stack._cur_seg;
 262 }
 263 
 264 template <class E, MEMFLAGS F>
 265 E* StackIterator<E, F>::next_addr()
 266 {
 267   assert(!is_empty(), "no items left");
 268   if (_cur_seg_size == 1) {
 269     E* addr = _cur_seg;
 270     _cur_seg = _stack.get_link(_cur_seg);
 271     _cur_seg_size = _stack.segment_size();
 272     _full_seg_size -= _stack.segment_size();
 273     return addr;
 274   }
 275   return _cur_seg + --_cur_seg_size;
 276 }
 277 
 278 #endif // SHARE_VM_UTILITIES_STACK_INLINE_HPP