1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/genCollectedHeap.hpp" 27 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 28 #include "logging/log.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "memory/universe.inline.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/heapMonitoring.hpp" 33 #include "runtime/thread.inline.hpp" 34 #include "utilities/copy.hpp" 35 36 // Thread-Local Edens support 37 38 // static member initialization 39 size_t ThreadLocalAllocBuffer::_max_size = 0; 40 int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0; 41 unsigned ThreadLocalAllocBuffer::_target_refills = 0; 42 GlobalTLABStats* ThreadLocalAllocBuffer::_global_stats = NULL; 43 44 void ThreadLocalAllocBuffer::clear_before_allocation() { 45 _slow_refill_waste += (unsigned)remaining(); 46 make_parsable(true); // also retire the TLAB 47 } 48 49 void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() { 50 global_stats()->initialize(); 51 52 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 53 thread->tlab().accumulate_statistics(); 54 thread->tlab().initialize_statistics(); 55 } 56 57 // Publish new stats if some allocation occurred. 58 if (global_stats()->allocation() != 0) { 59 global_stats()->publish(); 60 global_stats()->print(); 61 } 62 } 63 64 void ThreadLocalAllocBuffer::accumulate_statistics() { 65 Thread* thread = myThread(); 66 size_t capacity = Universe::heap()->tlab_capacity(thread); 67 size_t used = Universe::heap()->tlab_used(thread); 68 69 _gc_waste += (unsigned)remaining(); 70 size_t total_allocated = thread->allocated_bytes(); 71 size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc; 72 _allocated_before_last_gc = total_allocated; 73 74 print_stats("gc"); 75 76 if (_number_of_refills > 0) { 77 // Update allocation history if a reasonable amount of eden was allocated. 78 bool update_allocation_history = used > 0.5 * capacity; 79 80 if (update_allocation_history) { 81 // Average the fraction of eden allocated in a tlab by this 82 // thread for use in the next resize operation. 83 // _gc_waste is not subtracted because it's included in 84 // "used". 85 // The result can be larger than 1.0 due to direct to old allocations. 86 // These allocations should ideally not be counted but since it is not possible 87 // to filter them out here we just cap the fraction to be at most 1.0. 88 double alloc_frac = MIN2(1.0, (double) allocated_since_last_gc / used); 89 _allocation_fraction.sample(alloc_frac); 90 } 91 global_stats()->update_allocating_threads(); 92 global_stats()->update_number_of_refills(_number_of_refills); 93 global_stats()->update_allocation(_number_of_refills * desired_size()); 94 global_stats()->update_gc_waste(_gc_waste); 95 global_stats()->update_slow_refill_waste(_slow_refill_waste); 96 global_stats()->update_fast_refill_waste(_fast_refill_waste); 97 98 } else { 99 assert(_number_of_refills == 0 && _fast_refill_waste == 0 && 100 _slow_refill_waste == 0 && _gc_waste == 0, 101 "tlab stats == 0"); 102 } 103 global_stats()->update_slow_allocations(_slow_allocations); 104 } 105 106 // Fills the current tlab with a dummy filler array to create 107 // an illusion of a contiguous Eden and optionally retires the tlab. 108 // Waste accounting should be done in caller as appropriate; see, 109 // for example, clear_before_allocation(). 110 void ThreadLocalAllocBuffer::make_parsable(bool retire, bool zap) { 111 if (end() != NULL) { 112 invariants(); 113 114 if (retire) { 115 myThread()->incr_allocated_bytes(used_bytes()); 116 } 117 118 CollectedHeap::fill_with_object(top(), hard_end(), retire && zap); 119 120 if (retire || ZeroTLAB) { // "Reset" the TLAB 121 set_start(NULL); 122 set_top(NULL); 123 set_pf_top(NULL); 124 set_end(NULL); 125 set_actual_end(NULL); 126 set_slow_path_end(NULL); 127 } 128 } 129 assert(!(retire || ZeroTLAB) || 130 (start() == NULL && end() == NULL && top() == NULL && 131 actual_end() == NULL && slow_path_end() == NULL), 132 "TLAB must be reset"); 133 } 134 135 void ThreadLocalAllocBuffer::resize_all_tlabs() { 136 if (ResizeTLAB) { 137 for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { 138 thread->tlab().resize(); 139 } 140 } 141 } 142 143 void ThreadLocalAllocBuffer::resize() { 144 // Compute the next tlab size using expected allocation amount 145 assert(ResizeTLAB, "Should not call this otherwise"); 146 size_t alloc = (size_t)(_allocation_fraction.average() * 147 (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize)); 148 size_t new_size = alloc / _target_refills; 149 150 new_size = MIN2(MAX2(new_size, min_size()), max_size()); 151 152 size_t aligned_new_size = align_object_size(new_size); 153 154 log_trace(gc, tlab)("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]" 155 " refills %d alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT, 156 p2i(myThread()), myThread()->osthread()->thread_id(), 157 _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size); 158 159 set_desired_size(aligned_new_size); 160 set_refill_waste_limit(initial_refill_waste_limit()); 161 } 162 163 void ThreadLocalAllocBuffer::initialize_statistics() { 164 _number_of_refills = 0; 165 _fast_refill_waste = 0; 166 _slow_refill_waste = 0; 167 _gc_waste = 0; 168 _slow_allocations = 0; 169 } 170 171 void ThreadLocalAllocBuffer::fill(HeapWord* start, 172 HeapWord* top, 173 size_t new_size) { 174 _number_of_refills++; 175 print_stats("fill"); 176 assert(top <= start + new_size - alignment_reserve(), "size too small"); 177 178 // Remember old bytes until sample for the next tlab only if this is our first 179 // actual refill. 180 size_t old_bytes_until_sample = 0; 181 if (_number_of_refills > 1) { 182 old_bytes_until_sample = bytes_until_sample(); 183 } 184 185 initialize(start, top, start + new_size - alignment_reserve()); 186 187 if (old_bytes_until_sample > 0) { 188 set_bytes_until_sample(old_bytes_until_sample); 189 set_sample_end(); 190 } 191 192 // Reset amount of internal fragmentation 193 set_refill_waste_limit(initial_refill_waste_limit()); 194 } 195 196 void ThreadLocalAllocBuffer::initialize(HeapWord* start, 197 HeapWord* top, 198 HeapWord* end) { 199 set_start(start); 200 set_top(top); 201 set_pf_top(top); 202 set_end(end); 203 set_actual_end(end); 204 set_slow_path_end(end); 205 invariants(); 206 _bytes_until_sample = 0; 207 } 208 209 void ThreadLocalAllocBuffer::initialize() { 210 initialize(NULL, // start 211 NULL, // top 212 NULL); // end 213 214 set_desired_size(initial_desired_size()); 215 216 // Following check is needed because at startup the main (primordial) 217 // thread is initialized before the heap is. The initialization for 218 // this thread is redone in startup_initialization below. 219 if (Universe::heap() != NULL) { 220 size_t capacity = Universe::heap()->tlab_capacity(myThread()) / HeapWordSize; 221 double alloc_frac = desired_size() * target_refills() / (double) capacity; 222 _allocation_fraction.sample(alloc_frac); 223 } 224 225 set_refill_waste_limit(initial_refill_waste_limit()); 226 227 initialize_statistics(); 228 } 229 230 void ThreadLocalAllocBuffer::startup_initialization() { 231 232 // Assuming each thread's active tlab is, on average, 233 // 1/2 full at a GC 234 _target_refills = 100 / (2 * TLABWasteTargetPercent); 235 _target_refills = MAX2(_target_refills, (unsigned)1U); 236 237 _global_stats = new GlobalTLABStats(); 238 239 #ifdef COMPILER2 240 // If the C2 compiler is present, extra space is needed at the end of 241 // TLABs, otherwise prefetching instructions generated by the C2 242 // compiler will fault (due to accessing memory outside of heap). 243 // The amount of space is the max of the number of lines to 244 // prefetch for array and for instance allocations. (Extra space must be 245 // reserved to accommodate both types of allocations.) 246 // 247 // Only SPARC-specific BIS instructions are known to fault. (Those 248 // instructions are generated if AllocatePrefetchStyle==3 and 249 // AllocatePrefetchInstr==1). To be on the safe side, however, 250 // extra space is reserved for all combinations of 251 // AllocatePrefetchStyle and AllocatePrefetchInstr. 252 // 253 // If the C2 compiler is not present, no space is reserved. 254 255 // +1 for rounding up to next cache line, +1 to be safe 256 if (is_server_compilation_mode_vm()) { 257 int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2; 258 _reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) / 259 (int)HeapWordSize; 260 } 261 #endif 262 263 // During jvm startup, the main (primordial) thread is initialized 264 // before the heap is initialized. So reinitialize it now. 265 guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread"); 266 Thread::current()->tlab().initialize(); 267 268 log_develop_trace(gc, tlab)("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT, 269 min_size(), Thread::current()->tlab().initial_desired_size(), max_size()); 270 } 271 272 size_t ThreadLocalAllocBuffer::initial_desired_size() { 273 size_t init_sz = 0; 274 275 if (TLABSize > 0) { 276 init_sz = TLABSize / HeapWordSize; 277 } else if (global_stats() != NULL) { 278 // Initial size is a function of the average number of allocating threads. 279 unsigned nof_threads = global_stats()->allocating_threads_avg(); 280 281 init_sz = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) / 282 (nof_threads * target_refills()); 283 init_sz = align_object_size(init_sz); 284 } 285 init_sz = MIN2(MAX2(init_sz, min_size()), max_size()); 286 return init_sz; 287 } 288 289 void ThreadLocalAllocBuffer::print_stats(const char* tag) { 290 Log(gc, tlab) log; 291 if (!log.is_trace()) { 292 return; 293 } 294 295 Thread* thrd = myThread(); 296 size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste; 297 size_t alloc = _number_of_refills * _desired_size; 298 double waste_percent = percent_of(waste, alloc); 299 size_t tlab_used = Universe::heap()->tlab_used(thrd); 300 log.trace("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]" 301 " desired_size: " SIZE_FORMAT "KB" 302 " slow allocs: %d refill waste: " SIZE_FORMAT "B" 303 " alloc:%8.5f %8.0fKB refills: %d waste %4.1f%% gc: %dB" 304 " slow: %dB fast: %dB", 305 tag, p2i(thrd), thrd->osthread()->thread_id(), 306 _desired_size / (K / HeapWordSize), 307 _slow_allocations, _refill_waste_limit * HeapWordSize, 308 _allocation_fraction.average(), 309 _allocation_fraction.average() * tlab_used / K, 310 _number_of_refills, waste_percent, 311 _gc_waste * HeapWordSize, 312 _slow_refill_waste * HeapWordSize, 313 _fast_refill_waste * HeapWordSize); 314 } 315 316 void ThreadLocalAllocBuffer::verify() { 317 HeapWord* p = start(); 318 HeapWord* t = top(); 319 HeapWord* prev_p = NULL; 320 while (p < t) { 321 oop(p)->verify(); 322 prev_p = p; 323 p += oop(p)->size(); 324 } 325 guarantee(p == top(), "end of last object must match end of space"); 326 } 327 328 void ThreadLocalAllocBuffer::set_sample_end() { 329 size_t heap_words_remaining = _end - _top; 330 size_t bytes_left = bytes_until_sample(); 331 size_t words_until_sample = bytes_left / HeapWordSize; 332 333 if (heap_words_remaining > words_until_sample) { 334 HeapWord* new_end = _top + words_until_sample; 335 set_end(new_end); 336 set_slow_path_end(new_end); 337 set_bytes_until_sample(0); 338 } else { 339 bytes_left -= heap_words_remaining * HeapWordSize; 340 set_bytes_until_sample(bytes_left); 341 } 342 } 343 344 void ThreadLocalAllocBuffer::pick_next_sample(size_t diff) { 345 if (!HeapMonitoring::enabled()) { 346 return; 347 } 348 349 if (bytes_until_sample() == 0) { 350 HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); 351 } 352 353 if (diff > 0) { 354 // Try to correct sample size by removing extra space from last allocation. 355 if (bytes_until_sample() > diff * HeapWordSize) { 356 set_bytes_until_sample(bytes_until_sample() - diff * HeapWordSize); 357 } 358 } 359 360 set_sample_end(); 361 362 log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" 363 " start: %p top: %p end: %p actual_end: %p slow_path_end: %p", 364 p2i(myThread()), myThread()->osthread()->thread_id(), 365 start(), top(), end(), 366 actual_end(), slow_path_end()); 367 } 368 369 Thread* ThreadLocalAllocBuffer::myThread() { 370 return (Thread*)(((char *)this) + 371 in_bytes(start_offset()) - 372 in_bytes(Thread::tlab_start_offset())); 373 } 374 375 void ThreadLocalAllocBuffer::set_back_actual_end() { 376 // Did a fast TLAB refill occur? 377 if (_slow_path_end != _end) { 378 // Fix up the actual end to be now the end of this TLAB. 379 _slow_path_end = _end; 380 _actual_end = _end; 381 } else { 382 _end = _actual_end; 383 } 384 } 385 386 void ThreadLocalAllocBuffer::handle_sample(Thread* thread, HeapWord* result, 387 size_t size) { 388 if (!HeapMonitoring::enabled()) { 389 return; 390 } 391 392 size_t size_in_bytes = size * HeapWordSize; 393 if (bytes_until_sample() > size_in_bytes) { 394 set_bytes_until_sample(bytes_until_sample() - size_in_bytes); 395 } else { 396 // Technically this is not exactly right, we probably should remember how many bytes are 397 // negative probably to then reduce our next sample size. 398 set_bytes_until_sample(0); 399 } 400 401 // Should we sample now? 402 if (should_sample()) { 403 HeapMonitoring::object_alloc_do_sample(thread, 404 reinterpret_cast<oopDesc*>(result), 405 size_in_bytes); 406 set_back_actual_end(); 407 pick_next_sample(); 408 } 409 } 410 411 HeapWord* ThreadLocalAllocBuffer::hard_end() { 412 // Did a fast TLAB refill occur? 413 if (_slow_path_end != _end) { 414 // Fix up the actual end to be now the end of this TLAB. 415 _slow_path_end = _end; 416 _actual_end = _end; 417 } 418 419 return _actual_end + alignment_reserve(); 420 } 421 GlobalTLABStats::GlobalTLABStats() : 422 _allocating_threads_avg(TLABAllocationWeight) { 423 424 initialize(); 425 426 _allocating_threads_avg.sample(1); // One allocating thread at startup 427 428 if (UsePerfData) { 429 430 EXCEPTION_MARK; 431 ResourceMark rm; 432 433 char* cname = PerfDataManager::counter_name("tlab", "allocThreads"); 434 _perf_allocating_threads = 435 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_None, CHECK); 436 437 cname = PerfDataManager::counter_name("tlab", "fills"); 438 _perf_total_refills = 439 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_None, CHECK); 440 441 cname = PerfDataManager::counter_name("tlab", "maxFills"); 442 _perf_max_refills = 443 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_None, CHECK); 444 445 cname = PerfDataManager::counter_name("tlab", "alloc"); 446 _perf_allocation = 447 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 448 449 cname = PerfDataManager::counter_name("tlab", "gcWaste"); 450 _perf_gc_waste = 451 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 452 453 cname = PerfDataManager::counter_name("tlab", "maxGcWaste"); 454 _perf_max_gc_waste = 455 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 456 457 cname = PerfDataManager::counter_name("tlab", "slowWaste"); 458 _perf_slow_refill_waste = 459 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 460 461 cname = PerfDataManager::counter_name("tlab", "maxSlowWaste"); 462 _perf_max_slow_refill_waste = 463 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 464 465 cname = PerfDataManager::counter_name("tlab", "fastWaste"); 466 _perf_fast_refill_waste = 467 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 468 469 cname = PerfDataManager::counter_name("tlab", "maxFastWaste"); 470 _perf_max_fast_refill_waste = 471 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes, CHECK); 472 473 cname = PerfDataManager::counter_name("tlab", "slowAlloc"); 474 _perf_slow_allocations = 475 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_None, CHECK); 476 477 cname = PerfDataManager::counter_name("tlab", "maxSlowAlloc"); 478 _perf_max_slow_allocations = 479 PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_None, CHECK); 480 } 481 } 482 483 void GlobalTLABStats::initialize() { 484 // Clear counters summarizing info from all threads 485 _allocating_threads = 0; 486 _total_refills = 0; 487 _max_refills = 0; 488 _total_allocation = 0; 489 _total_gc_waste = 0; 490 _max_gc_waste = 0; 491 _total_slow_refill_waste = 0; 492 _max_slow_refill_waste = 0; 493 _total_fast_refill_waste = 0; 494 _max_fast_refill_waste = 0; 495 _total_slow_allocations = 0; 496 _max_slow_allocations = 0; 497 } 498 499 void GlobalTLABStats::publish() { 500 _allocating_threads_avg.sample(_allocating_threads); 501 if (UsePerfData) { 502 _perf_allocating_threads ->set_value(_allocating_threads); 503 _perf_total_refills ->set_value(_total_refills); 504 _perf_max_refills ->set_value(_max_refills); 505 _perf_allocation ->set_value(_total_allocation); 506 _perf_gc_waste ->set_value(_total_gc_waste); 507 _perf_max_gc_waste ->set_value(_max_gc_waste); 508 _perf_slow_refill_waste ->set_value(_total_slow_refill_waste); 509 _perf_max_slow_refill_waste->set_value(_max_slow_refill_waste); 510 _perf_fast_refill_waste ->set_value(_total_fast_refill_waste); 511 _perf_max_fast_refill_waste->set_value(_max_fast_refill_waste); 512 _perf_slow_allocations ->set_value(_total_slow_allocations); 513 _perf_max_slow_allocations ->set_value(_max_slow_allocations); 514 } 515 } 516 517 void GlobalTLABStats::print() { 518 Log(gc, tlab) log; 519 if (!log.is_debug()) { 520 return; 521 } 522 523 size_t waste = _total_gc_waste + _total_slow_refill_waste + _total_fast_refill_waste; 524 double waste_percent = percent_of(waste, _total_allocation); 525 log.debug("TLAB totals: thrds: %d refills: %d max: %d" 526 " slow allocs: %d max %d waste: %4.1f%%" 527 " gc: " SIZE_FORMAT "B max: " SIZE_FORMAT "B" 528 " slow: " SIZE_FORMAT "B max: " SIZE_FORMAT "B" 529 " fast: " SIZE_FORMAT "B max: " SIZE_FORMAT "B", 530 _allocating_threads, 531 _total_refills, _max_refills, 532 _total_slow_allocations, _max_slow_allocations, 533 waste_percent, 534 _total_gc_waste * HeapWordSize, 535 _max_gc_waste * HeapWordSize, 536 _total_slow_refill_waste * HeapWordSize, 537 _max_slow_refill_waste * HeapWordSize, 538 _total_fast_refill_waste * HeapWordSize, 539 _max_fast_refill_waste * HeapWordSize); 540 }