1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/allocation.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahBrooksPointer.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  31 #include "memory/space.inline.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "memory/universe.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/java.hpp"
  36 #include "runtime/mutexLocker.hpp"
  37 #include "runtime/os.hpp"
  38 #include "runtime/safepoint.hpp"
  39 
  40 size_t ShenandoahHeapRegion::RegionCount = 0;
  41 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  42 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  43 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  44 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  45 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  46 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  47 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  48 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  49 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  50 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  51 
  52 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
  53                                            size_t size_words, size_t index, bool committed) :
  54   _heap(heap),
  55   _pacer(ShenandoahPacing ? heap->pacer() : NULL),
  56   _reserved(MemRegion(start, size_words)),
  57   _region_number(index),
  58   _new_top(NULL),
  59   _critical_pins(0),
  60   _empty_time(os::elapsedTime()),
  61   _state(committed ? _empty_committed : _empty_uncommitted),
  62   _tlab_allocs(0),
  63   _gclab_allocs(0),
  64   _shared_allocs(0),
  65   _live_data(0) {
  66 
  67   ContiguousSpace::initialize(_reserved, true, committed);
  68 }
  69 
  70 size_t ShenandoahHeapRegion::region_number() const {
  71   return _region_number;
  72 }
  73 
  74 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  75   ResourceMark rm;
  76   stringStream ss;
  77   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  78   print_on(&ss);
  79   fatal(ss.as_string());
  80 }
  81 
  82 void ShenandoahHeapRegion::make_regular_allocation() {
  83   _heap->assert_heaplock_owned_by_current_thread();
  84   switch (_state) {
  85     case _empty_uncommitted:
  86       do_commit();
  87     case _empty_committed:
  88       _state = _regular;
  89     case _regular:
  90     case _pinned:
  91       return;
  92     default:
  93       report_illegal_transition("regular allocation");
  94   }
  95 }
  96 
  97 void ShenandoahHeapRegion::make_regular_bypass() {
  98   _heap->assert_heaplock_owned_by_current_thread();
  99   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 100 
 101   switch (_state) {
 102     case _empty_uncommitted:
 103       do_commit();
 104     case _empty_committed:
 105     case _cset:
 106     case _humongous_start:
 107     case _humongous_cont:
 108       _state = _regular;
 109       return;
 110     case _pinned_cset:
 111       _state = _pinned;
 112       return;
 113     case _regular:
 114     case _pinned:
 115       return;
 116     default:
 117       report_illegal_transition("regular bypass");
 118   }
 119 }
 120 
 121 void ShenandoahHeapRegion::make_humongous_start() {
 122   _heap->assert_heaplock_owned_by_current_thread();
 123   switch (_state) {
 124     case _empty_uncommitted:
 125       do_commit();
 126     case _empty_committed:
 127       _state = _humongous_start;
 128       return;
 129     default:
 130       report_illegal_transition("humongous start allocation");
 131   }
 132 }
 133 
 134 void ShenandoahHeapRegion::make_humongous_start_bypass() {
 135   _heap->assert_heaplock_owned_by_current_thread();
 136   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 137 
 138   switch (_state) {
 139     case _empty_committed:
 140     case _regular:
 141     case _humongous_start:
 142     case _humongous_cont:
 143       _state = _humongous_start;
 144       return;
 145     default:
 146       report_illegal_transition("humongous start bypass");
 147   }
 148 }
 149 
 150 void ShenandoahHeapRegion::make_humongous_cont() {
 151   _heap->assert_heaplock_owned_by_current_thread();
 152   switch (_state) {
 153     case _empty_uncommitted:
 154       do_commit();
 155     case _empty_committed:
 156       _state = _humongous_cont;
 157       return;
 158     default:
 159       report_illegal_transition("humongous continuation allocation");
 160   }
 161 }
 162 
 163 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
 164   _heap->assert_heaplock_owned_by_current_thread();
 165   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 166 
 167   switch (_state) {
 168     case _empty_committed:
 169     case _regular:
 170     case _humongous_start:
 171     case _humongous_cont:
 172       _state = _humongous_cont;
 173       return;
 174     default:
 175       report_illegal_transition("humongous continuation bypass");
 176   }
 177 }
 178 
 179 void ShenandoahHeapRegion::make_pinned() {
 180   _heap->assert_heaplock_owned_by_current_thread();
 181   switch (_state) {
 182     case _regular:
 183       assert (_critical_pins == 0, "sanity");
 184       _state = _pinned;
 185     case _pinned_cset:
 186     case _pinned:
 187       _critical_pins++;
 188       return;
 189     case _humongous_start:
 190       assert (_critical_pins == 0, "sanity");
 191       _state = _pinned_humongous_start;
 192     case _pinned_humongous_start:
 193       _critical_pins++;
 194       return;
 195     case _cset:
 196       guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
 197       assert (_critical_pins == 0, "sanity");
 198       _state = _pinned_cset;
 199       _critical_pins++;
 200       return;
 201     default:
 202       report_illegal_transition("pinning");
 203   }
 204 }
 205 
 206 void ShenandoahHeapRegion::make_unpinned() {
 207   _heap->assert_heaplock_owned_by_current_thread();
 208   switch (_state) {
 209     case _pinned:
 210       assert (_critical_pins > 0, "sanity");
 211       _critical_pins--;
 212       if (_critical_pins == 0) {
 213         _state = _regular;
 214       }
 215       return;
 216     case _regular:
 217     case _humongous_start:
 218       assert (_critical_pins == 0, "sanity");
 219       return;
 220     case _pinned_cset:
 221       guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
 222       assert (_critical_pins > 0, "sanity");
 223       _critical_pins--;
 224       if (_critical_pins == 0) {
 225         _state = _cset;
 226       }
 227       return;
 228     case _pinned_humongous_start:
 229       assert (_critical_pins > 0, "sanity");
 230       _critical_pins--;
 231       if (_critical_pins == 0) {
 232         _state = _humongous_start;
 233       }
 234       return;
 235     default:
 236       report_illegal_transition("unpinning");
 237   }
 238 }
 239 
 240 void ShenandoahHeapRegion::make_cset() {
 241   _heap->assert_heaplock_owned_by_current_thread();
 242   switch (_state) {
 243     case _regular:
 244       _state = _cset;
 245     case _cset:
 246       return;
 247     default:
 248       report_illegal_transition("cset");
 249   }
 250 }
 251 
 252 void ShenandoahHeapRegion::make_trash() {
 253   _heap->assert_heaplock_owned_by_current_thread();
 254   switch (_state) {
 255     case _cset:
 256       // Reclaiming cset regions
 257     case _humongous_start:
 258     case _humongous_cont:
 259       // Reclaiming humongous regions
 260     case _regular:
 261       // Immediate region reclaim
 262       _state = _trash;
 263       return;
 264     default:
 265       report_illegal_transition("trashing");
 266   }
 267 }
 268 
 269 void ShenandoahHeapRegion::make_trash_immediate() {
 270   make_trash();
 271 
 272   // On this path, we know there are no marked objects in the region,
 273   // tell marking context about it to bypass bitmap resets.
 274   _heap->complete_marking_context()->reset_top_bitmap(this);
 275 }
 276 
 277 void ShenandoahHeapRegion::make_empty() {
 278   _heap->assert_heaplock_owned_by_current_thread();
 279   switch (_state) {
 280     case _trash:
 281       _state = _empty_committed;
 282       _empty_time = os::elapsedTime();
 283       return;
 284     default:
 285       report_illegal_transition("emptying");
 286   }
 287 }
 288 
 289 void ShenandoahHeapRegion::make_uncommitted() {
 290   _heap->assert_heaplock_owned_by_current_thread();
 291   switch (_state) {
 292     case _empty_committed:
 293       do_uncommit();
 294       _state = _empty_uncommitted;
 295       return;
 296     default:
 297       report_illegal_transition("uncommiting");
 298   }
 299 }
 300 
 301 void ShenandoahHeapRegion::make_committed_bypass() {
 302   _heap->assert_heaplock_owned_by_current_thread();
 303   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 304 
 305   switch (_state) {
 306     case _empty_uncommitted:
 307       do_commit();
 308       _state = _empty_committed;
 309       return;
 310     default:
 311       report_illegal_transition("commit bypass");
 312   }
 313 }
 314 
 315 void ShenandoahHeapRegion::clear_live_data() {
 316   OrderAccess::release_store_fence((volatile jint*)&_live_data, 0);
 317 }
 318 
 319 void ShenandoahHeapRegion::reset_alloc_metadata() {
 320   _tlab_allocs = 0;
 321   _gclab_allocs = 0;
 322   _shared_allocs = 0;
 323 }
 324 
 325 void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() {
 326   if (used() > 0) {
 327     _tlab_allocs = 0;
 328     _gclab_allocs = 0;
 329     _shared_allocs = used() >> LogHeapWordSize;
 330   } else {
 331     reset_alloc_metadata();
 332   }
 333 }
 334 
 335 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 336   return _shared_allocs * HeapWordSize;
 337 }
 338 
 339 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 340   return _tlab_allocs * HeapWordSize;
 341 }
 342 
 343 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 344   return _gclab_allocs * HeapWordSize;
 345 }
 346 
 347 void ShenandoahHeapRegion::set_live_data(size_t s) {
 348   assert(Thread::current()->is_VM_thread(), "by VM thread");
 349   size_t v = s >> LogHeapWordSize;
 350   assert(v < (size_t)max_jint, "sanity");
 351   _live_data = (jint)v;
 352 }
 353 
 354 size_t ShenandoahHeapRegion::get_live_data_words() const {
 355   jint v = OrderAccess::load_acquire((volatile jint*)&_live_data);
 356   assert(v >= 0, "sanity");
 357   return (size_t)v;
 358 }
 359 
 360 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
 361   return get_live_data_words() * HeapWordSize;
 362 }
 363 
 364 bool ShenandoahHeapRegion::has_live() const {
 365   return get_live_data_words() != 0;
 366 }
 367 
 368 size_t ShenandoahHeapRegion::garbage() const {
 369   assert(used() >= get_live_data_bytes(), err_msg("Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
 370          get_live_data_bytes(), used()));
 371   size_t result = used() - get_live_data_bytes();
 372   return result;
 373 }
 374 
 375 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 376   st->print("|");
 377   st->print(SIZE_FORMAT_W(5), this->_region_number);
 378 
 379   switch (_state) {
 380     case _empty_uncommitted:
 381       st->print("|EU ");
 382       break;
 383     case _empty_committed:
 384       st->print("|EC ");
 385       break;
 386     case _regular:
 387       st->print("|R  ");
 388       break;
 389     case _humongous_start:
 390       st->print("|H  ");
 391       break;
 392     case _pinned_humongous_start:
 393       st->print("|HP ");
 394       break;
 395     case _humongous_cont:
 396       st->print("|HC ");
 397       break;
 398     case _cset:
 399       st->print("|CS ");
 400       break;
 401     case _trash:
 402       st->print("|T  ");
 403       break;
 404     case _pinned:
 405       st->print("|P  ");
 406       break;
 407     case _pinned_cset:
 408       st->print("|CSP");
 409       break;
 410     default:
 411       ShouldNotReachHere();
 412   }
 413   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
 414             p2i(bottom()), p2i(top()), p2i(end()));
 415   st->print("|TAMS " INTPTR_FORMAT_W(12),
 416             p2i(_heap->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 417   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 418   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 419   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 420   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 421   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 422   st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
 423 
 424   st->cr();
 425 }
 426 
 427 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 428   assert(is_humongous(), "Must be a part of the humongous region");
 429   size_t reg_num = region_number();
 430   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 431   while (!r->is_humongous_start()) {
 432     assert(reg_num > 0, "Sanity");
 433     reg_num --;
 434     r = _heap->get_region(reg_num);
 435     assert(r->is_humongous(), "Must be a part of the humongous region");
 436   }
 437   assert(r->is_humongous_start(), "Must be");
 438   return r;
 439 }
 440 
 441 void ShenandoahHeapRegion::recycle() {
 442   ContiguousSpace::clear(false);
 443   if (ZapUnusedHeapArea) {
 444     ContiguousSpace::mangle_unused_area_complete();
 445   }
 446   clear_live_data();
 447   reset_alloc_metadata();
 448 
 449   _heap->marking_context()->reset_top_at_mark_start(this);
 450 
 451   make_empty();
 452 }
 453 
 454 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 455   assert(MemRegion(bottom(), end()).contains(p),
 456          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 457                  p2i(p), p2i(bottom()), p2i(end())));
 458   if (p >= top()) {
 459     return top();
 460   } else {
 461     HeapWord* last = bottom() + ShenandoahBrooksPointer::word_size();
 462     HeapWord* cur = last;
 463     while (cur <= p) {
 464       last = cur;
 465       cur += oop(cur)->size() + ShenandoahBrooksPointer::word_size();
 466     }
 467     shenandoah_assert_correct(NULL, oop(last));
 468     return last;
 469   }
 470 }
 471 
 472 void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) {
 473   // Absolute minimums we should not ever break:
 474   static const size_t MIN_REGION_SIZE = 256*K;
 475 
 476   size_t region_size;
 477   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 478     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 479       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 480                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
 481                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
 482       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 483     }
 484     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 485       err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
 486                       ShenandoahMinRegionSize/K,  MIN_REGION_SIZE/K);
 487       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 488     }
 489     if (ShenandoahMinRegionSize < MinTLABSize) {
 490       err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
 491                       ShenandoahMinRegionSize/K,  MinTLABSize/K);
 492       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 493     }
 494     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 495       err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
 496                       ShenandoahMaxRegionSize/K,  MIN_REGION_SIZE/K);
 497       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 498     }
 499     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 500       err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
 501                       ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
 502       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 503     }
 504 
 505     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 506     // for usual heap sizes. Do not depend on initial_heap_size here.
 507     region_size = max_heap_size / ShenandoahTargetNumRegions;
 508 
 509     // Now make sure that we don't go over or under our limits.
 510     region_size = MAX2<size_t>(ShenandoahMinRegionSize, region_size);
 511     region_size = MIN2<size_t>(ShenandoahMaxRegionSize, region_size);
 512 
 513   } else {
 514     if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 515       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 516                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
 517                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
 518       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 519     }
 520     if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
 521       err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
 522                       ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
 523       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 524     }
 525     if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
 526       err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
 527                       ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
 528       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 529     }
 530     region_size = ShenandoahHeapRegionSize;
 531   }
 532 
 533   if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) {
 534     vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]");
 535   }
 536 
 537   // Make sure region size is at least one large page, if enabled.
 538   // Otherwise, uncommitting one region may falsely uncommit the adjacent
 539   // regions too.
 540   // Also see shenandoahArguments.cpp, where it handles UseLargePages.
 541   if (UseLargePages && ShenandoahUncommit) {
 542     region_size = MAX2(region_size, os::large_page_size());
 543   }
 544 
 545   int region_size_log = log2_long((jlong) region_size);
 546   // Recalculate the region size to make sure it's a power of
 547   // 2. This means that region_size is the largest power of 2 that's
 548   // <= what we've calculated so far.
 549   region_size = size_t(1) << region_size_log;
 550 
 551   // Now, set up the globals.
 552   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 553   RegionSizeBytesShift = (size_t)region_size_log;
 554 
 555   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 556   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 557 
 558   guarantee(RegionSizeBytes == 0, "we should only set it once");
 559   RegionSizeBytes = region_size;
 560   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 561   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 562 
 563   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 564   RegionSizeWordsMask = RegionSizeWords - 1;
 565 
 566   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 567   RegionSizeBytesMask = RegionSizeBytes - 1;
 568 
 569   guarantee(RegionCount == 0, "we should only set it once");
 570   RegionCount = max_heap_size / RegionSizeBytes;
 571 
 572   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 573   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 574   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 575 
 576   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 577   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 578   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 579 
 580   // The rationale for trimming the TLAB sizes has to do with the raciness in
 581   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 582   // about next free size, gets the answer for region #N, goes away for a while, then
 583   // tries to allocate in region #N, and fail because some other thread have claimed part
 584   // of the region #N, and then the freeset allocation code has to retire the region #N,
 585   // before moving the allocation to region #N+1.
 586   //
 587   // The worst case realizes when "answer" is "region size", which means it could
 588   // prematurely retire an entire region. Having smaller TLABs does not fix that
 589   // completely, but reduces the probability of too wasteful region retirement.
 590   // With current divisor, we will waste no more than 1/8 of region size in the worst
 591   // case. This also has a secondary effect on collection set selection: even under
 592   // the race, the regions would be at least 7/8 used, which allows relying on
 593   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 594   // below the garbage threshold that would never be considered for collection.
 595   //
 596   // The whole thing would be mitigated if Elastic TLABs were enabled, but there
 597   // is no support in this JDK.
 598   //
 599   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 600   MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes);
 601   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 602 
 603   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 604   MaxTLABSizeWords = MaxTLABSizeBytes / HeapWordSize;
 605 
 606   log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
 607                      RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
 608   log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
 609                      byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
 610   log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
 611                      byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
 612 }
 613 
 614 void ShenandoahHeapRegion::do_commit() {
 615   if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
 616     report_java_out_of_memory("Unable to commit region");
 617   }
 618   if (!_heap->commit_bitmap_slice(this)) {
 619     report_java_out_of_memory("Unable to commit bitmaps for region");
 620   }
 621   _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 622 }
 623 
 624 void ShenandoahHeapRegion::do_uncommit() {
 625   if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
 626     report_java_out_of_memory("Unable to uncommit region");
 627   }
 628   if (!_heap->uncommit_bitmap_slice(this)) {
 629     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 630   }
 631   _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 632 }