1 /* 2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "memory/allocation.hpp" 27 #include "gc_implementation/shenandoah/shenandoahBrooksPointer.hpp" 28 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" 30 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" 31 #include "memory/space.inline.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/os.hpp" 38 #include "runtime/safepoint.hpp" 39 40 size_t ShenandoahHeapRegion::RegionCount = 0; 41 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 42 size_t ShenandoahHeapRegion::RegionSizeWords = 0; 43 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; 44 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; 45 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; 46 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; 47 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0; 48 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0; 49 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; 50 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; 51 52 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, 53 size_t size_words, size_t index, bool committed) : 54 _heap(heap), 55 _reserved(MemRegion(start, size_words)), 56 _region_number(index), 57 _new_top(NULL), 58 _critical_pins(0), 59 _empty_time(os::elapsedTime()), 60 _state(committed ? _empty_committed : _empty_uncommitted), 61 _tlab_allocs(0), 62 _gclab_allocs(0), 63 _shared_allocs(0), 64 _live_data(0) { 65 66 ContiguousSpace::initialize(_reserved, true, committed); 67 } 68 69 size_t ShenandoahHeapRegion::region_number() const { 70 return _region_number; 71 } 72 73 void ShenandoahHeapRegion::report_illegal_transition(const char *method) { 74 ResourceMark rm; 75 stringStream ss; 76 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method); 77 print_on(&ss); 78 fatal(ss.as_string()); 79 } 80 81 void ShenandoahHeapRegion::make_regular_allocation() { 82 _heap->assert_heaplock_owned_by_current_thread(); 83 switch (_state) { 84 case _empty_uncommitted: 85 do_commit(); 86 case _empty_committed: 87 _state = _regular; 88 case _regular: 89 case _pinned: 90 return; 91 default: 92 report_illegal_transition("regular allocation"); 93 } 94 } 95 96 void ShenandoahHeapRegion::make_regular_bypass() { 97 _heap->assert_heaplock_owned_by_current_thread(); 98 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 99 100 switch (_state) { 101 case _empty_uncommitted: 102 do_commit(); 103 case _empty_committed: 104 case _cset: 105 case _humongous_start: 106 case _humongous_cont: 107 _state = _regular; 108 return; 109 case _pinned_cset: 110 _state = _pinned; 111 return; 112 case _regular: 113 case _pinned: 114 return; 115 default: 116 report_illegal_transition("regular bypass"); 117 } 118 } 119 120 void ShenandoahHeapRegion::make_humongous_start() { 121 _heap->assert_heaplock_owned_by_current_thread(); 122 switch (_state) { 123 case _empty_uncommitted: 124 do_commit(); 125 case _empty_committed: 126 _state = _humongous_start; 127 return; 128 default: 129 report_illegal_transition("humongous start allocation"); 130 } 131 } 132 133 void ShenandoahHeapRegion::make_humongous_start_bypass() { 134 _heap->assert_heaplock_owned_by_current_thread(); 135 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 136 137 switch (_state) { 138 case _empty_committed: 139 case _regular: 140 case _humongous_start: 141 case _humongous_cont: 142 _state = _humongous_start; 143 return; 144 default: 145 report_illegal_transition("humongous start bypass"); 146 } 147 } 148 149 void ShenandoahHeapRegion::make_humongous_cont() { 150 _heap->assert_heaplock_owned_by_current_thread(); 151 switch (_state) { 152 case _empty_uncommitted: 153 do_commit(); 154 case _empty_committed: 155 _state = _humongous_cont; 156 return; 157 default: 158 report_illegal_transition("humongous continuation allocation"); 159 } 160 } 161 162 void ShenandoahHeapRegion::make_humongous_cont_bypass() { 163 _heap->assert_heaplock_owned_by_current_thread(); 164 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 165 166 switch (_state) { 167 case _empty_committed: 168 case _regular: 169 case _humongous_start: 170 case _humongous_cont: 171 _state = _humongous_cont; 172 return; 173 default: 174 report_illegal_transition("humongous continuation bypass"); 175 } 176 } 177 178 void ShenandoahHeapRegion::make_pinned() { 179 _heap->assert_heaplock_owned_by_current_thread(); 180 switch (_state) { 181 case _regular: 182 assert (_critical_pins == 0, "sanity"); 183 _state = _pinned; 184 case _pinned_cset: 185 case _pinned: 186 _critical_pins++; 187 return; 188 case _humongous_start: 189 assert (_critical_pins == 0, "sanity"); 190 _state = _pinned_humongous_start; 191 case _pinned_humongous_start: 192 _critical_pins++; 193 return; 194 case _cset: 195 guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled"); 196 assert (_critical_pins == 0, "sanity"); 197 _state = _pinned_cset; 198 _critical_pins++; 199 return; 200 default: 201 report_illegal_transition("pinning"); 202 } 203 } 204 205 void ShenandoahHeapRegion::make_unpinned() { 206 _heap->assert_heaplock_owned_by_current_thread(); 207 switch (_state) { 208 case _pinned: 209 assert (_critical_pins > 0, "sanity"); 210 _critical_pins--; 211 if (_critical_pins == 0) { 212 _state = _regular; 213 } 214 return; 215 case _regular: 216 case _humongous_start: 217 assert (_critical_pins == 0, "sanity"); 218 return; 219 case _pinned_cset: 220 guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled"); 221 assert (_critical_pins > 0, "sanity"); 222 _critical_pins--; 223 if (_critical_pins == 0) { 224 _state = _cset; 225 } 226 return; 227 case _pinned_humongous_start: 228 assert (_critical_pins > 0, "sanity"); 229 _critical_pins--; 230 if (_critical_pins == 0) { 231 _state = _humongous_start; 232 } 233 return; 234 default: 235 report_illegal_transition("unpinning"); 236 } 237 } 238 239 void ShenandoahHeapRegion::make_cset() { 240 _heap->assert_heaplock_owned_by_current_thread(); 241 switch (_state) { 242 case _regular: 243 _state = _cset; 244 case _cset: 245 return; 246 default: 247 report_illegal_transition("cset"); 248 } 249 } 250 251 void ShenandoahHeapRegion::make_trash() { 252 _heap->assert_heaplock_owned_by_current_thread(); 253 switch (_state) { 254 case _cset: 255 // Reclaiming cset regions 256 case _humongous_start: 257 case _humongous_cont: 258 // Reclaiming humongous regions 259 case _regular: 260 // Immediate region reclaim 261 _state = _trash; 262 return; 263 default: 264 report_illegal_transition("trashing"); 265 } 266 } 267 268 void ShenandoahHeapRegion::make_trash_immediate() { 269 make_trash(); 270 271 // On this path, we know there are no marked objects in the region, 272 // tell marking context about it to bypass bitmap resets. 273 _heap->complete_marking_context()->reset_top_bitmap(this); 274 } 275 276 void ShenandoahHeapRegion::make_empty() { 277 _heap->assert_heaplock_owned_by_current_thread(); 278 switch (_state) { 279 case _trash: 280 _state = _empty_committed; 281 _empty_time = os::elapsedTime(); 282 return; 283 default: 284 report_illegal_transition("emptying"); 285 } 286 } 287 288 void ShenandoahHeapRegion::make_uncommitted() { 289 _heap->assert_heaplock_owned_by_current_thread(); 290 switch (_state) { 291 case _empty_committed: 292 do_uncommit(); 293 _state = _empty_uncommitted; 294 return; 295 default: 296 report_illegal_transition("uncommiting"); 297 } 298 } 299 300 void ShenandoahHeapRegion::make_committed_bypass() { 301 _heap->assert_heaplock_owned_by_current_thread(); 302 assert (_heap->is_full_gc_in_progress(), "only for full GC"); 303 304 switch (_state) { 305 case _empty_uncommitted: 306 do_commit(); 307 _state = _empty_committed; 308 return; 309 default: 310 report_illegal_transition("commit bypass"); 311 } 312 } 313 314 void ShenandoahHeapRegion::clear_live_data() { 315 OrderAccess::release_store_fence((volatile jint*)&_live_data, 0); 316 } 317 318 void ShenandoahHeapRegion::reset_alloc_metadata() { 319 _tlab_allocs = 0; 320 _gclab_allocs = 0; 321 _shared_allocs = 0; 322 } 323 324 void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() { 325 if (used() > 0) { 326 _tlab_allocs = 0; 327 _gclab_allocs = 0; 328 _shared_allocs = used() >> LogHeapWordSize; 329 } else { 330 reset_alloc_metadata(); 331 } 332 } 333 334 size_t ShenandoahHeapRegion::get_shared_allocs() const { 335 return _shared_allocs * HeapWordSize; 336 } 337 338 size_t ShenandoahHeapRegion::get_tlab_allocs() const { 339 return _tlab_allocs * HeapWordSize; 340 } 341 342 size_t ShenandoahHeapRegion::get_gclab_allocs() const { 343 return _gclab_allocs * HeapWordSize; 344 } 345 346 void ShenandoahHeapRegion::set_live_data(size_t s) { 347 assert(Thread::current()->is_VM_thread(), "by VM thread"); 348 size_t v = s >> LogHeapWordSize; 349 assert(v < (size_t)max_jint, "sanity"); 350 _live_data = (jint)v; 351 } 352 353 size_t ShenandoahHeapRegion::get_live_data_words() const { 354 jint v = OrderAccess::load_acquire((volatile jint*)&_live_data); 355 assert(v >= 0, "sanity"); 356 return (size_t)v; 357 } 358 359 size_t ShenandoahHeapRegion::get_live_data_bytes() const { 360 return get_live_data_words() * HeapWordSize; 361 } 362 363 bool ShenandoahHeapRegion::has_live() const { 364 return get_live_data_words() != 0; 365 } 366 367 size_t ShenandoahHeapRegion::garbage() const { 368 assert(used() >= get_live_data_bytes(), err_msg("Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT, 369 get_live_data_bytes(), used())); 370 size_t result = used() - get_live_data_bytes(); 371 return result; 372 } 373 374 void ShenandoahHeapRegion::print_on(outputStream* st) const { 375 st->print("|"); 376 st->print(SIZE_FORMAT_W(5), this->_region_number); 377 378 switch (_state) { 379 case _empty_uncommitted: 380 st->print("|EU "); 381 break; 382 case _empty_committed: 383 st->print("|EC "); 384 break; 385 case _regular: 386 st->print("|R "); 387 break; 388 case _humongous_start: 389 st->print("|H "); 390 break; 391 case _pinned_humongous_start: 392 st->print("|HP "); 393 break; 394 case _humongous_cont: 395 st->print("|HC "); 396 break; 397 case _cset: 398 st->print("|CS "); 399 break; 400 case _trash: 401 st->print("|T "); 402 break; 403 case _pinned: 404 st->print("|P "); 405 break; 406 case _pinned_cset: 407 st->print("|CSP"); 408 break; 409 default: 410 ShouldNotReachHere(); 411 } 412 st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12), 413 p2i(bottom()), p2i(top()), p2i(end())); 414 st->print("|TAMS " INTPTR_FORMAT_W(12), 415 p2i(_heap->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this)))); 416 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); 417 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); 418 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); 419 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); 420 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); 421 st->print("|CP " SIZE_FORMAT_W(3), _critical_pins); 422 423 st->cr(); 424 } 425 426 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { 427 assert(is_humongous(), "Must be a part of the humongous region"); 428 size_t reg_num = region_number(); 429 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this); 430 while (!r->is_humongous_start()) { 431 assert(reg_num > 0, "Sanity"); 432 reg_num --; 433 r = _heap->get_region(reg_num); 434 assert(r->is_humongous(), "Must be a part of the humongous region"); 435 } 436 assert(r->is_humongous_start(), "Must be"); 437 return r; 438 } 439 440 void ShenandoahHeapRegion::recycle() { 441 ContiguousSpace::clear(false); 442 if (ZapUnusedHeapArea) { 443 ContiguousSpace::mangle_unused_area_complete(); 444 } 445 clear_live_data(); 446 reset_alloc_metadata(); 447 448 _heap->marking_context()->reset_top_at_mark_start(this); 449 450 make_empty(); 451 } 452 453 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const { 454 assert(MemRegion(bottom(), end()).contains(p), 455 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 456 p2i(p), p2i(bottom()), p2i(end()))); 457 if (p >= top()) { 458 return top(); 459 } else { 460 HeapWord* last = bottom() + ShenandoahBrooksPointer::word_size(); 461 HeapWord* cur = last; 462 while (cur <= p) { 463 last = cur; 464 cur += oop(cur)->size() + ShenandoahBrooksPointer::word_size(); 465 } 466 shenandoah_assert_correct(NULL, oop(last)); 467 return last; 468 } 469 } 470 471 void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) { 472 // Absolute minimums we should not ever break: 473 static const size_t MIN_REGION_SIZE = 256*K; 474 475 size_t region_size; 476 if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) { 477 if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 478 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 479 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).", 480 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K); 481 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 482 } 483 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 484 err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).", 485 ShenandoahMinRegionSize/K, MIN_REGION_SIZE/K); 486 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 487 } 488 if (ShenandoahMinRegionSize < MinTLABSize) { 489 err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).", 490 ShenandoahMinRegionSize/K, MinTLABSize/K); 491 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 492 } 493 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 494 err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).", 495 ShenandoahMaxRegionSize/K, MIN_REGION_SIZE/K); 496 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 497 } 498 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 499 err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).", 500 ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K); 501 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 502 } 503 504 // We rapidly expand to max_heap_size in most scenarios, so that is the measure 505 // for usual heap sizes. Do not depend on initial_heap_size here. 506 region_size = max_heap_size / ShenandoahTargetNumRegions; 507 508 // Now make sure that we don't go over or under our limits. 509 region_size = MAX2<size_t>(ShenandoahMinRegionSize, region_size); 510 region_size = MIN2<size_t>(ShenandoahMaxRegionSize, region_size); 511 512 } else { 513 if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) { 514 err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number " 515 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).", 516 initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K); 517 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 518 } 519 if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) { 520 err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).", 521 ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K); 522 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 523 } 524 if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) { 525 err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).", 526 ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K); 527 vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message); 528 } 529 region_size = ShenandoahHeapRegionSize; 530 } 531 532 if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) { 533 vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]"); 534 } 535 536 // Make sure region size is at least one large page, if enabled. 537 // Otherwise, uncommitting one region may falsely uncommit the adjacent 538 // regions too. 539 // Also see shenandoahArguments.cpp, where it handles UseLargePages. 540 if (UseLargePages && ShenandoahUncommit) { 541 region_size = MAX2(region_size, os::large_page_size()); 542 } 543 544 int region_size_log = log2_long((jlong) region_size); 545 // Recalculate the region size to make sure it's a power of 546 // 2. This means that region_size is the largest power of 2 that's 547 // <= what we've calculated so far. 548 region_size = size_t(1) << region_size_log; 549 550 // Now, set up the globals. 551 guarantee(RegionSizeBytesShift == 0, "we should only set it once"); 552 RegionSizeBytesShift = (size_t)region_size_log; 553 554 guarantee(RegionSizeWordsShift == 0, "we should only set it once"); 555 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize; 556 557 guarantee(RegionSizeBytes == 0, "we should only set it once"); 558 RegionSizeBytes = region_size; 559 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize; 560 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity"); 561 562 guarantee(RegionSizeWordsMask == 0, "we should only set it once"); 563 RegionSizeWordsMask = RegionSizeWords - 1; 564 565 guarantee(RegionSizeBytesMask == 0, "we should only set it once"); 566 RegionSizeBytesMask = RegionSizeBytes - 1; 567 568 guarantee(RegionCount == 0, "we should only set it once"); 569 RegionCount = max_heap_size / RegionSizeBytes; 570 571 guarantee(HumongousThresholdWords == 0, "we should only set it once"); 572 HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100; 573 assert (HumongousThresholdWords <= RegionSizeWords, "sanity"); 574 575 guarantee(HumongousThresholdBytes == 0, "we should only set it once"); 576 HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize; 577 assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity"); 578 579 // The rationale for trimming the TLAB sizes has to do with the raciness in 580 // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah 581 // about next free size, gets the answer for region #N, goes away for a while, then 582 // tries to allocate in region #N, and fail because some other thread have claimed part 583 // of the region #N, and then the freeset allocation code has to retire the region #N, 584 // before moving the allocation to region #N+1. 585 // 586 // The worst case realizes when "answer" is "region size", which means it could 587 // prematurely retire an entire region. Having smaller TLABs does not fix that 588 // completely, but reduces the probability of too wasteful region retirement. 589 // With current divisor, we will waste no more than 1/8 of region size in the worst 590 // case. This also has a secondary effect on collection set selection: even under 591 // the race, the regions would be at least 7/8 used, which allows relying on 592 // "used" - "live" for cset selection. Otherwise, we can get the fragmented region 593 // below the garbage threshold that would never be considered for collection. 594 // 595 // The whole thing would be mitigated if Elastic TLABs were enabled, but there 596 // is no support in this JDK. 597 // 598 guarantee(MaxTLABSizeBytes == 0, "we should only set it once"); 599 MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes); 600 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger"); 601 602 guarantee(MaxTLABSizeWords == 0, "we should only set it once"); 603 MaxTLABSizeWords = MaxTLABSizeBytes / HeapWordSize; 604 605 log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s", 606 RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes)); 607 log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s", 608 byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes)); 609 log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s", 610 byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes)); 611 } 612 613 void ShenandoahHeapRegion::do_commit() { 614 if (!_heap->is_heap_region_special() && !os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) { 615 report_java_out_of_memory("Unable to commit region"); 616 } 617 if (!_heap->commit_bitmap_slice(this)) { 618 report_java_out_of_memory("Unable to commit bitmaps for region"); 619 } 620 _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes()); 621 } 622 623 void ShenandoahHeapRegion::do_uncommit() { 624 if (!_heap->is_heap_region_special() && !os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) { 625 report_java_out_of_memory("Unable to uncommit region"); 626 } 627 if (!_heap->uncommit_bitmap_slice(this)) { 628 report_java_out_of_memory("Unable to uncommit bitmaps for region"); 629 } 630 _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes()); 631 }