1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "memory/allocation.hpp" 26 #include "gc_implementation/shenandoah/brooksPointer.hpp" 27 #include "gc_implementation/shenandoah/shenandoahHeap.hpp" 28 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" 30 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" 31 #include "memory/space.inline.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/java.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/os.hpp" 37 #include "runtime/safepoint.hpp" 38 39 size_t ShenandoahHeapRegion::RegionCount = 0; 40 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 41 size_t ShenandoahHeapRegion::RegionSizeWords = 0; 42 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; 43 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; 44 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; 45 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; 46 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0; 47 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0; 48 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; 49 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; 50 51 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 52 size_t size_words, size_t index, bool committed) : 53 _heap(heap), 54 _region_number(index), 55 _live_data(0), 56 _reserved(MemRegion(start, size_words)), 57 _tlab_allocs(0), 58 _gclab_allocs(0), 59 _shared_allocs(0), 60 _new_top(NULL), 61 _critical_pins(0), 62 _state(committed ? _empty_committed : _empty_uncommitted), 63 _empty_time(os::elapsedTime()), 64 _pacer(ShenandoahPacing ? heap->pacer() : NULL) { 65 66 ContiguousSpace::initialize(_reserved, true, committed); 67 } 68 69 size_t ShenandoahHeapRegion::region_number() const { 70 return _region_number; 71 } 72 73 void ShenandoahHeapRegion::report_illegal_transition(const char *method) { 74 ResourceMark rm; 75 stringStream ss; 76 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method); 77 print_on(&ss); 78 fatal(ss.as_string()); 79 } 80 81 void ShenandoahHeapRegion::make_regular_allocation() { 82 _heap->assert_heaplock_owned_by_current_thread(); 83 switch (_state) { 84 case _empty_uncommitted: 85 do_commit(); 86 case _empty_committed: 87 _state = _regular; 88 case _regular: 89 case _pinned: 90 return; 91 default: 92 report_illegal_transition("regular allocation"); 93 } 94 } 95 96 void ShenandoahHeapRegion::make_regular_bypass() { 97 _heap->assert_heaplock_owned_by_current_thread(); 98 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 99 100 switch (_state) { 101 case _empty_uncommitted: 102 do_commit(); 103 case _empty_committed: 104 case _cset: 105 case _humongous_start: 106 case _humongous_cont: 107 _state = _regular; 108 return; 109 case _pinned_cset: 110 _state = _pinned; 111 return; 112 case _regular: 113 case _pinned: 114 return; 115 default: 116 report_illegal_transition("regular bypass"); 117 } 118 } 119 120 void ShenandoahHeapRegion::make_humongous_start() { 121 _heap->assert_heaplock_owned_by_current_thread(); 122 switch (_state) { 123 case _empty_uncommitted: 124 do_commit(); 125 case _empty_committed: 126 _state = _humongous_start; 127 return; 128 default: 129 report_illegal_transition("humongous start allocation"); 130 } 131 } 132 133 void ShenandoahHeapRegion::make_humongous_start_bypass() { 134 _heap->assert_heaplock_owned_by_current_thread(); 135 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 136 137 switch (_state) { 138 case _empty_committed: 139 case _regular: 140 case _humongous_start: 141 case _humongous_cont: 142 _state = _humongous_start; 143 return; 144 default: 145 report_illegal_transition("humongous start bypass"); 146 } 147 } 148 149 void ShenandoahHeapRegion::make_humongous_cont() { 150 _heap->assert_heaplock_owned_by_current_thread(); 151 switch (_state) { 152 case _empty_uncommitted: 153 do_commit(); 154 case _empty_committed: 155 _state = _humongous_cont; 156 return; 157 default: 158 report_illegal_transition("humongous continuation allocation"); 159 } 160 } 161 162 void ShenandoahHeapRegion::make_humongous_cont_bypass() { 163 _heap->assert_heaplock_owned_by_current_thread(); 164 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 165 166 switch (_state) { 167 case _empty_committed: 168 case _regular: 169 case _humongous_start: 170 case _humongous_cont: 171 _state = _humongous_cont; 172 return; 173 default: 174 report_illegal_transition("humongous continuation bypass"); 175 } 176 } 177 178 void ShenandoahHeapRegion::make_pinned() { 179 _heap->assert_heaplock_owned_by_current_thread(); 180 switch (_state) { 181 case _regular: 182 assert (_critical_pins == 0, "sanity"); 183 _state = _pinned; 184 case _pinned_cset: 185 case _pinned: 186 _critical_pins++; 187 return; 188 case _humongous_start: 189 assert (_critical_pins == 0, "sanity"); 190 _state = _pinned_humongous_start; 191 case _pinned_humongous_start: 192 _critical_pins++; 193 return; 194 case _cset: 195 guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled"); 196 assert (_critical_pins == 0, "sanity"); 197 _state = _pinned_cset; 198 _critical_pins++; 199 return; 200 default: 201 report_illegal_transition("pinning"); 202 } 203 } 204 205 void ShenandoahHeapRegion::make_unpinned() { 206 _heap->assert_heaplock_owned_by_current_thread(); 207 switch (_state) { 208 case _pinned: 209 assert (_critical_pins > 0, "sanity"); 210 _critical_pins--; 211 if (_critical_pins == 0) { 212 _state = _regular; 213 } 214 return; 215 case _regular: 216 case _humongous_start: 217 assert (_critical_pins == 0, "sanity"); 218 return; 219 case _pinned_cset: 220 guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled"); 221 assert (_critical_pins > 0, "sanity"); 222 _critical_pins--; 223 if (_critical_pins == 0) { 224 _state = _cset; 225 } 226 return; 227 case _pinned_humongous_start: 228 assert (_critical_pins > 0, "sanity"); 229 _critical_pins--; 230 if (_critical_pins == 0) { 231 _state = _humongous_start; 232 } 233 return; 234 default: 235 report_illegal_transition("unpinning"); 236 } 237 } 238 239 void ShenandoahHeapRegion::make_cset() { 240 _heap->assert_heaplock_owned_by_current_thread(); 241 switch (_state) { 242 case _regular: 243 _state = _cset; 244 case _cset: 245 return; 246 default: 247 report_illegal_transition("cset"); 248 } 249 } 250 251 void ShenandoahHeapRegion::make_trash() { 252 _heap->assert_heaplock_owned_by_current_thread(); 253 switch (_state) { 254 case _cset: 255 // Reclaiming cset regions 256 case _humongous_start: 257 case _humongous_cont: 258 // Reclaiming humongous regions 259 case _regular: 260 // Immediate region reclaim 261 _state = _trash; 262 return; 263 default: 264 report_illegal_transition("trashing"); 265 } 266 } 267 268 void ShenandoahHeapRegion::make_empty() { 269 _heap->assert_heaplock_owned_by_current_thread(); 270 switch (_state) { 271 case _trash: 272 _state = _empty_committed; 273 _empty_time = os::elapsedTime(); 274 return; 275 default: 276 report_illegal_transition("emptying"); 277 } 278 } 279 280 void ShenandoahHeapRegion::make_uncommitted() { 281 _heap->assert_heaplock_owned_by_current_thread(); 282 switch (_state) { 283 case _empty_committed: 284 do_uncommit(); 285 _state = _empty_uncommitted; 286 return; 287 default: 288 report_illegal_transition("uncommiting"); 289 } 290 } 291 292 void ShenandoahHeapRegion::make_committed_bypass() { 293 _heap->assert_heaplock_owned_by_current_thread(); 294 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 295 296 switch (_state) { 297 case _empty_uncommitted: 298 do_commit(); 299 _state = _empty_committed; 300 return; 301 default: 302 report_illegal_transition("commit bypass"); 303 } 304 } 305 306 bool ShenandoahHeapRegion::rollback_allocation(uint size) { 307 set_top(top() - size); 308 return true; 309 } 310 311 void ShenandoahHeapRegion::clear_live_data() { 312 OrderAccess::release_store_fence((volatile jint*)&_live_data, 0); 313 } 314 315 void ShenandoahHeapRegion::reset_alloc_metadata() { 316 _tlab_allocs = 0; 317 _gclab_allocs = 0; 318 _shared_allocs = 0; 319 } 320 321 void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() { 322 if (used() > 0) { 323 _tlab_allocs = 0; 324 _gclab_allocs = 0; 325 _shared_allocs = used() >> LogHeapWordSize; 326 } else { 327 reset_alloc_metadata(); 328 } 329 } 330 331 size_t ShenandoahHeapRegion::get_shared_allocs() const { 332 return _shared_allocs * HeapWordSize; 333 } 334 335 size_t ShenandoahHeapRegion::get_tlab_allocs() const { 336 return _tlab_allocs * HeapWordSize; 337 } 338 339 size_t ShenandoahHeapRegion::get_gclab_allocs() const { 340 return _gclab_allocs * HeapWordSize; 341 } 342 343 void ShenandoahHeapRegion::set_live_data(size_t s) { 344 assert(Thread::current()->is_VM_thread(), "by VM thread"); 345 size_t v = s >> LogHeapWordSize; 346 assert(v < (size_t)max_jint, "sanity"); 347 _live_data = (jint)v; 348 } 349 350 size_t ShenandoahHeapRegion::get_live_data_words() const { 351 jint v = OrderAccess::load_acquire((volatile jint*)&_live_data); 352 assert(v >= 0, "sanity"); 353 return (size_t)v; 354 } 355 356 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 357 return get_live_data_words() * HeapWordSize; 358 } 359 360 bool ShenandoahHeapRegion::has_live() const { 361 return get_live_data_words() != 0; 362 } 363 364 size_t ShenandoahHeapRegion::garbage() const { 365 assert(used() >= get_live_data_bytes(), err_msg("Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT, 366 get_live_data_bytes(), used())); 367 size_t result = used() - get_live_data_bytes(); 368 return result; 369 } 370 371 bool ShenandoahHeapRegion::in_collection_set() const { 372 return _heap->region_in_collection_set(_region_number); 373 } 374 375 void ShenandoahHeapRegion::print_on(outputStream* st) const { 376 st->print("|"); 377 st->print(SIZE_FORMAT_W(5), this->_region_number); 378 379 switch (_state) { 380 case _empty_uncommitted: 381 st->print("|EU "); 382 break; 383 case _empty_committed: 384 st->print("|EC "); 385 break; 386 case _regular: 387 st->print("|R "); 388 break; 389 case _humongous_start: 390 st->print("|H "); 391 break; 392 case _pinned_humongous_start: 393 st->print("|HP "); 394 break; 395 case _humongous_cont: 396 st->print("|HC "); 397 break; 398 case _cset: 399 st->print("|CS "); 400 break; 401 case _trash: 402 st->print("|T "); 403 break; 404 case _pinned: 405 st->print("|P "); 406 break; 407 case _pinned_cset: 408 st->print("|CSP"); 409 break; 410 default: 411 ShouldNotReachHere(); 412 } 413 st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12), 414 p2i(bottom()), p2i(top()), p2i(end())); 415 st->print("|TAMS " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12), 416 p2i(_heap->complete_marking_context()->top_at_mark_start(region_number())), 417 p2i(_heap->next_marking_context()->top_at_mark_start(region_number()))); 418 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); 419 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); 420 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); 421 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); 422 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); 423 st->print("|CP " SIZE_FORMAT_W(3), _critical_pins); 424 425 st->cr(); 426 } 427 428 void ShenandoahHeapRegion::fill_region() { 429 if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) { 430 HeapWord* filler = allocate(BrooksPointer::word_size(), ShenandoahHeap::_alloc_shared); 431 HeapWord* obj = allocate(end() - top(), ShenandoahHeap::_alloc_shared); 432 _heap->fill_with_object(obj, end() - obj); 433 BrooksPointer::initialize(oop(obj)); 434 } 435 } 436 437 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { 438 assert(is_humongous(), "Must be a part of the humongous region"); 439 size_t reg_num = region_number(); 440 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this); 441 while (!r->is_humongous_start()) { 442 assert(reg_num > 0, "Sanity"); 443 reg_num --; 444 r = _heap->get_region(reg_num); 445 assert(r->is_humongous(), "Must be a part of the humongous region"); 446 } 447 assert(r->is_humongous_start(), "Must be"); 448 return r; 449 } 450 451 void ShenandoahHeapRegion::recycle() { 452 ContiguousSpace::clear(false); 453 if (ZapUnusedHeapArea) { 454 ContiguousSpace::mangle_unused_area_complete(); 455 } 456 clear_live_data(); 457 reset_alloc_metadata(); 458 459 ShenandoahMarkingContext* const compl_ctx = _heap->complete_marking_context(); 460 461 // Reset C-TAMS pointer to ensure size-based iteration, everything 462 // in that regions is going to be new objects. 463 compl_ctx->set_top_at_mark_start(region_number(), bottom()); 464 // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. 465 assert(compl_ctx->is_bitmap_clear_range(bottom(), end()), "must be clear"); 466 467 make_empty(); 468 } 469 470 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 471 assert(MemRegion(bottom(), end()).contains(p), 472 err_msg("p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")", 473 p2i(p), p2i(bottom()), p2i(end()))); 474 if (p >= top()) { 475 return top(); 476 } else { 477 HeapWord* last = bottom() + BrooksPointer::word_size(); 478 HeapWord* cur = last; 479 while (cur <= p) { 480 last = cur; 481 cur += oop(cur)->size() + BrooksPointer::word_size(); 482 } 483 shenandoah_assert_correct(NULL, oop(last)); 484 return last; 485 } 486 } 487 488 void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) { 489 // Absolute minimums we should not ever break: 490 static const size_t MIN_REGION_SIZE = 256*K; 491 static const size_t MIN_NUM_REGIONS = 10; 492 493 size_t region_size; 494 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 495 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 496 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 497 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", 498 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); 499 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 500 } 501 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 502 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", 503 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); 504 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 505 } 506 if (ShenandoahMinRegionSize < MinTLABSize) { 507 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", 508 ShenandoahMinRegionSize/K, MinTLABSize/K); 509 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 510 } 511 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 512 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", 513 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); 514 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 515 } 516 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 517 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", 518 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); 519 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 520 } 521 522 // We rapidly expand to max_heap_size in most scenarios, so that is the measure 523 // for usual heap sizes. Do not depend on initial_heap_size here. 524 region_size = max_heap_size / ShenandoahTargetNumRegions; 525 526 // Now make sure that we don't go over or under our limits. 527 region_size = MAX2(ShenandoahMinRegionSize, region_size); 528 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 529 530 } else { 531 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 532 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 533 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", 534 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); 535 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 536 } 537 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { 538 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).", 539 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); 540 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 541 } 542 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { 543 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).", 544 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); 545 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 546 } 547 region_size = ShenandoahHeapRegionSize; 548 } 549 550 if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) { 551 vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]"); 552 } 553 554 // Make sure region size is at least one large page, if enabled. 555 // Otherwise, mem-protecting one region may falsely protect the adjacent 556 // regions too. 557 if (UseLargePages) { 558 region_size = MAX2(region_size, os::large_page_size()); 559 } 560 561 int region_size_log = log2_long((jlong) region_size); 562 // Recalculate the region size to make sure it's a power of 563 // 2. This means that region_size is the largest power of 2 that's 564 // <= what we've calculated so far. 565 region_size = (size_t(1) << region_size_log); 566 567 // Now, set up the globals. 568 guarantee(RegionSizeBytesShift == 0, "we should only set it once"); 569 RegionSizeBytesShift = (size_t)region_size_log; 570 571 guarantee(RegionSizeWordsShift == 0, "we should only set it once"); 572 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize; 573 574 guarantee(RegionSizeBytes == 0, "we should only set it once"); 575 RegionSizeBytes = region_size; 576 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize; 577 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity"); 578 579 guarantee(RegionSizeWordsMask == 0, "we should only set it once"); 580 RegionSizeWordsMask = RegionSizeWords - 1; 581 582 guarantee(RegionSizeBytesMask == 0, "we should only set it once"); 583 RegionSizeBytesMask = RegionSizeBytes - 1; 584 585 guarantee(RegionCount == 0, "we should only set it once"); 586 RegionCount = max_heap_size / RegionSizeBytes; 587 588 guarantee(HumongousThresholdWords == 0, "we should only set it once"); 589 HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100; 590 assert (HumongousThresholdWords <= RegionSizeWords, "sanity"); 591 592 guarantee(HumongousThresholdBytes == 0, "we should only set it once"); 593 HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize; 594 assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity"); 595 596 // The rationale for trimming the TLAB sizes has to do with the raciness in 597 // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah 598 // about next free size, gets the answer for region #N, goes away for a while, then 599 // tries to allocate in region #N, and fail because some other thread have claimed part 600 // of the region #N, and then the freeset allocation code has to retire the region #N, 601 // before moving the allocation to region #N+1. 602 // 603 // The worst case realizes when "answer" is "region size", which means it could 604 // prematurely retire an entire region. Having smaller TLABs does not fix that 605 // completely, but reduces the probability of too wasteful region retirement. 606 // With current divisor, we will waste no more than 1/8 of region size in the worst 607 // case. This also has a secondary effect on collection set selection: even under 608 // the race, the regions would be at least 7/8 used, which allows relying on 609 // "used" - "live" for cset selection. Otherwise, we can get the fragmented region 610 // below the garbage threshold that would never be considered for collection. 611 // 612 // The whole thing would be mitigated if Elastic TLABs were enabled, but there 613 // is no support in this JDK. 614 // 615 guarantee(MaxTLABSizeBytes == 0, "we should only set it once"); 616 MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes); 617 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger"); 618 619 guarantee(MaxTLABSizeWords == 0, "we should only set it once"); 620 MaxTLABSizeWords = MaxTLABSizeBytes / HeapWordSize; 621 622 log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s", 623 RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes)); 624 log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s", 625 byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes)); 626 log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s", 627 byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes)); 628 } 629 630 void ShenandoahHeapRegion::do_commit() { 631 if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) { 632 report_java_out_of_memory("Unable to commit region"); 633 } 634 if (!_heap->commit_bitmap_slice(this)) { 635 report_java_out_of_memory("Unable to commit bitmaps for region"); 636 } 637 _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes()); 638 } 639 640 void ShenandoahHeapRegion::do_uncommit() { 641 if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) { 642 report_java_out_of_memory("Unable to uncommit region"); 643 } 644 if (!_heap->uncommit_bitmap_slice(this)) { 645 report_java_out_of_memory("Unable to uncommit bitmaps for region"); 646 } 647 _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes()); 648 }