1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/z/zErrno.hpp" 26 #include "gc/z/zGlobals.hpp" 27 #include "gc/z/zLock.inline.hpp" 28 #include "gc/z/zMarkStack.inline.hpp" 29 #include "logging/log.hpp" 30 #include "runtime/atomic.hpp" 31 #include "utilities/debug.hpp" 32 33 #include <sys/mman.h> 34 #include <sys/types.h> 35 36 ZMarkStackSpace::ZMarkStackSpace() : 37 _expand_lock(), 38 _top(0), 39 _end(0) { 40 assert(ZMarkStacksMax >= ZMarkStackSpaceExpandSize, "ZMarkStacksMax too small"); 41 assert(ZMarkStacksMax <= ZMarkStackSpaceSize, "ZMarkStacksMax too large"); 42 43 // Reserve address space 44 const void* res = mmap((void*)ZMarkStackSpaceStart, ZMarkStackSpaceSize, 45 PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); 46 if (res != (void*)ZMarkStackSpaceStart) { 47 log_error(gc, marking)("Failed to reserve address space for marking stacks"); 48 return; 49 } 50 51 // Successfully initialized 52 _top = _end = ZMarkStackSpaceStart; 53 } 54 55 bool ZMarkStackSpace::is_initialized() const { 56 return _top != 0; 57 } 58 59 bool ZMarkStackSpace::expand() { 60 const size_t max = ZMarkStackSpaceStart + ZMarkStacksMax; 61 if (_end + ZMarkStackSpaceExpandSize > max) { 62 // Expansion limit reached 63 return false; 64 } 65 66 void* const res = mmap((void*)_end, ZMarkStackSpaceExpandSize, 67 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); 68 if (res == MAP_FAILED) { 69 ZErrno err; 70 log_error(gc, marking)("Failed to map memory for marking stacks (%s)", err.to_string()); 71 return false; 72 } 73 74 return true; 75 } 76 77 uintptr_t ZMarkStackSpace::alloc_space(size_t size) { 78 uintptr_t top = _top; 79 80 for (;;) { 81 const uintptr_t new_top = top + size; 82 if (new_top > _end) { 83 // Not enough space left 84 return 0; 85 } 86 87 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top); 88 if (prev_top == top) { 89 // Success 90 return top; 91 } 92 93 // Retry 94 top = prev_top; 95 } 96 } 97 98 uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) { 99 ZLocker locker(&_expand_lock); 100 101 // Retry allocation before expanding 102 uintptr_t addr = alloc_space(size); 103 if (addr != 0) { 104 return addr; 105 } 106 107 // Expand stack space 108 if (!expand()) { 109 // We currently can't handle the situation where we 110 // are running out of mark stack space. 111 fatal("Mark stack overflow (allocated " SIZE_FORMAT "M, size " SIZE_FORMAT "M, max " SIZE_FORMAT "M)," 112 " use -XX:ZMarkStacksMax=? to increase this limit", 113 (_end - ZMarkStackSpaceStart) / M, size / M, ZMarkStacksMax / M); 114 return 0; 115 } 116 117 log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", 118 (_end - ZMarkStackSpaceStart) / M, 119 (_end - ZMarkStackSpaceStart + ZMarkStackSpaceExpandSize) / M); 120 121 // Increment top before end to make sure another 122 // thread can't steal out newly expanded space. 123 addr = Atomic::add(size, &_top) - size; 124 _end += ZMarkStackSpaceExpandSize; 125 126 return addr; 127 } 128 129 uintptr_t ZMarkStackSpace::alloc(size_t size) { 130 const uintptr_t addr = alloc_space(size); 131 if (addr != 0) { 132 return addr; 133 } 134 135 return expand_and_alloc_space(size); 136 } 137 138 ZMarkStackAllocator::ZMarkStackAllocator() : 139 _freelist(), 140 _space() { 141 guarantee(sizeof(ZMarkStack) == ZMarkStackSize, "Size mismatch"); 142 guarantee(sizeof(ZMarkStackMagazine) <= ZMarkStackSize, "Size mismatch"); 143 144 // Prime free list to avoid an immediate space 145 // expansion when marking starts. 146 if (_space.is_initialized()) { 147 prime_freelist(); 148 } 149 } 150 151 bool ZMarkStackAllocator::is_initialized() const { 152 return _space.is_initialized(); 153 } 154 155 void ZMarkStackAllocator::prime_freelist() { 156 for (size_t size = 0; size < ZMarkStackSpaceExpandSize; size += ZMarkStackMagazineSize) { 157 const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); 158 ZMarkStackMagazine* const magazine = create_magazine_from_space(addr, ZMarkStackMagazineSize); 159 free_magazine(magazine); 160 } 161 } 162 163 ZMarkStackMagazine* ZMarkStackAllocator::create_magazine_from_space(uintptr_t addr, size_t size) { 164 assert(is_aligned(size, ZMarkStackSize), "Invalid size"); 165 166 // Use first stack as magazine 167 ZMarkStackMagazine* const magazine = new ((void*)addr) ZMarkStackMagazine(); 168 for (size_t i = ZMarkStackSize; i < size; i += ZMarkStackSize) { 169 ZMarkStack* const stack = new ((void*)(addr + i)) ZMarkStack(); 170 const bool success = magazine->push(stack); 171 assert(success, "Magazine should never get full"); 172 } 173 174 return magazine; 175 } 176 177 ZMarkStackMagazine* ZMarkStackAllocator::alloc_magazine() { 178 // Try allocating from the free list first 179 ZMarkStackMagazine* const magazine = _freelist.pop_atomic(); 180 if (magazine != NULL) { 181 return magazine; 182 } 183 184 // Allocate new magazine 185 const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); 186 if (addr == 0) { 187 return NULL; 188 } 189 190 return create_magazine_from_space(addr, ZMarkStackMagazineSize); 191 } 192 193 void ZMarkStackAllocator::free_magazine(ZMarkStackMagazine* magazine) { 194 _freelist.push_atomic(magazine); 195 } 196 197 ZMarkStripe::ZMarkStripe() : 198 _published(), 199 _overflowed() {} 200 201 ZMarkStripeSet::ZMarkStripeSet() : 202 _nstripes(0), 203 _nstripes_mask(0), 204 _stripes() {} 205 206 void ZMarkStripeSet::set_nstripes(size_t nstripes) { 207 assert(is_power_of_2(nstripes), "Must be a power of two"); 208 assert(is_power_of_2(ZMarkStripesMax), "Must be a power of two"); 209 assert(nstripes >= 1, "Invalid number of stripes"); 210 assert(nstripes <= ZMarkStripesMax, "Invalid number of stripes"); 211 212 _nstripes = nstripes; 213 _nstripes_mask = nstripes - 1; 214 215 log_debug(gc, marking)("Using " SIZE_FORMAT " mark stripes", _nstripes); 216 } 217 218 bool ZMarkStripeSet::is_empty() const { 219 for (size_t i = 0; i < _nstripes; i++) { 220 if (!_stripes[i].is_empty()) { 221 return false; 222 } 223 } 224 225 return true; 226 } 227 228 ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { 229 const size_t spillover_limit = (nworkers / _nstripes) * _nstripes; 230 size_t index; 231 232 if (worker_id < spillover_limit) { 233 // Not a spillover worker, use natural stripe 234 index = worker_id & _nstripes_mask; 235 } else { 236 // Distribute spillover workers evenly across stripes 237 const size_t spillover_nworkers = nworkers - spillover_limit; 238 const size_t spillover_worker_id = worker_id - spillover_limit; 239 const double spillover_chunk = (double)_nstripes / (double)spillover_nworkers; 240 index = spillover_worker_id * spillover_chunk; 241 } 242 243 assert(index < _nstripes, "Invalid index"); 244 return &_stripes[index]; 245 } 246 247 ZMarkThreadLocalStacks::ZMarkThreadLocalStacks() : 248 _magazine(NULL) { 249 for (size_t i = 0; i < ZMarkStripesMax; i++) { 250 _stacks[i] = NULL; 251 } 252 } 253 254 bool ZMarkThreadLocalStacks::is_empty(const ZMarkStripeSet* stripes) const { 255 for (size_t i = 0; i < stripes->nstripes(); i++) { 256 ZMarkStack* const stack = _stacks[i]; 257 if (stack != NULL) { 258 return false; 259 } 260 } 261 262 return true; 263 } 264 265 ZMarkStack* ZMarkThreadLocalStacks::allocate_stack(ZMarkStackAllocator* allocator) { 266 if (_magazine == NULL) { 267 // Allocate new magazine 268 _magazine = allocator->alloc_magazine(); 269 if (_magazine == NULL) { 270 return NULL; 271 } 272 } 273 274 ZMarkStack* stack = NULL; 275 276 if (!_magazine->pop(stack)) { 277 // Magazine is empty, convert magazine into a new stack 278 _magazine->~ZMarkStackMagazine(); 279 stack = new ((void*)_magazine) ZMarkStack(); 280 _magazine = NULL; 281 } 282 283 return stack; 284 } 285 286 void ZMarkThreadLocalStacks::free_stack(ZMarkStackAllocator* allocator, ZMarkStack* stack) { 287 for (;;) { 288 if (_magazine == NULL) { 289 // Convert stack into a new magazine 290 stack->~ZMarkStack(); 291 _magazine = new ((void*)stack) ZMarkStackMagazine(); 292 return; 293 } 294 295 if (_magazine->push(stack)) { 296 // Success 297 return; 298 } 299 300 // Free and uninstall full magazine 301 allocator->free_magazine(_magazine); 302 _magazine = NULL; 303 } 304 } 305 306 bool ZMarkThreadLocalStacks::push_slow(ZMarkStackAllocator* allocator, 307 ZMarkStripe* stripe, 308 ZMarkStack** stackp, 309 ZMarkStackEntry entry, 310 bool publish) { 311 ZMarkStack* stack = *stackp; 312 313 for (;;) { 314 if (stack == NULL) { 315 // Allocate and install new stack 316 *stackp = stack = allocate_stack(allocator); 317 if (stack == NULL) { 318 // Out of mark stack memory 319 return false; 320 } 321 } 322 323 if (stack->push(entry)) { 324 // Success 325 return true; 326 } 327 328 // Publish/Overflow and uninstall stack 329 stripe->publish_stack(stack, publish); 330 *stackp = stack = NULL; 331 } 332 } 333 334 bool ZMarkThreadLocalStacks::pop_slow(ZMarkStackAllocator* allocator, 335 ZMarkStripe* stripe, 336 ZMarkStack** stackp, 337 ZMarkStackEntry& entry) { 338 ZMarkStack* stack = *stackp; 339 340 for (;;) { 341 if (stack == NULL) { 342 // Try steal and install stack 343 *stackp = stack = stripe->steal_stack(); 344 if (stack == NULL) { 345 // Nothing to steal 346 return false; 347 } 348 } 349 350 if (stack->pop(entry)) { 351 // Success 352 return true; 353 } 354 355 // Free and uninstall stack 356 free_stack(allocator, stack); 357 *stackp = stack = NULL; 358 } 359 } 360 361 bool ZMarkThreadLocalStacks::flush(ZMarkStackAllocator* allocator, ZMarkStripeSet* stripes) { 362 bool flushed = false; 363 364 // Flush all stacks 365 for (size_t i = 0; i < stripes->nstripes(); i++) { 366 ZMarkStripe* const stripe = stripes->stripe_at(i); 367 ZMarkStack** const stackp = &_stacks[i]; 368 ZMarkStack* const stack = *stackp; 369 if (stack == NULL) { 370 continue; 371 } 372 373 // Free/Publish and uninstall stack 374 if (stack->is_empty()) { 375 free_stack(allocator, stack); 376 } else { 377 stripe->publish_stack(stack); 378 flushed = true; 379 } 380 *stackp = NULL; 381 } 382 383 return flushed; 384 } 385 386 void ZMarkThreadLocalStacks::free(ZMarkStackAllocator* allocator) { 387 // Free and uninstall magazine 388 if (_magazine != NULL) { 389 allocator->free_magazine(_magazine); 390 _magazine = NULL; 391 } 392 }