1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  31 #include "gc/shared/space.inline.hpp"
  32 #include "jfr/jfrEvents.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/os.hpp"
  41 #include "runtime/safepoint.hpp"
  42 
  43 size_t ShenandoahHeapRegion::RegionCount = 0;
  44 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  45 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  46 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  47 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  48 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  49 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  50 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  51 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  52 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  53 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  54 
  55 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  56   _index(index),
  57   _bottom(start),
  58   _end(start + RegionSizeWords),
  59   _new_top(NULL),
  60   _empty_time(os::elapsedTime()),
  61   _state(committed ? _empty_committed : _empty_uncommitted),
  62   _top(start),
  63   _tlab_allocs(0),
  64   _gclab_allocs(0),
  65   _live_data(0),
  66   _critical_pins(0),
  67   _update_watermark(start) {
  68 
  69   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  70          "invalid space boundaries");
  71   if (ZapUnusedHeapArea && committed) {
  72     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  73   }
  74 }
  75 
  76 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  77   ResourceMark rm;
  78   stringStream ss;
  79   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  80   print_on(&ss);
  81   fatal("%s", ss.as_string());
  82 }
  83 
  84 void ShenandoahHeapRegion::make_regular_allocation() {
  85   shenandoah_assert_heaplocked();
  86 
  87   switch (_state) {
  88     case _empty_uncommitted:
  89       do_commit();
  90     case _empty_committed:
  91       set_state(_regular);
  92     case _regular:
  93     case _pinned:
  94       return;
  95     default:
  96       report_illegal_transition("regular allocation");
  97   }
  98 }
  99 
 100 void ShenandoahHeapRegion::make_regular_bypass() {
 101   shenandoah_assert_heaplocked();
 102   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 103           "only for full or degen GC");
 104 
 105   switch (_state) {
 106     case _empty_uncommitted:
 107       do_commit();
 108     case _empty_committed:
 109     case _cset:
 110     case _humongous_start:
 111     case _humongous_cont:
 112       set_state(_regular);
 113       return;
 114     case _pinned_cset:
 115       set_state(_pinned);
 116       return;
 117     case _regular:
 118     case _pinned:
 119       return;
 120     default:
 121       report_illegal_transition("regular bypass");
 122   }
 123 }
 124 
 125 void ShenandoahHeapRegion::make_humongous_start() {
 126   shenandoah_assert_heaplocked();
 127   switch (_state) {
 128     case _empty_uncommitted:
 129       do_commit();
 130     case _empty_committed:
 131       set_state(_humongous_start);
 132       return;
 133     default:
 134       report_illegal_transition("humongous start allocation");
 135   }
 136 }
 137 
 138 void ShenandoahHeapRegion::make_humongous_start_bypass() {
 139   shenandoah_assert_heaplocked();
 140   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 141 
 142   switch (_state) {
 143     case _empty_committed:
 144     case _regular:
 145     case _humongous_start:
 146     case _humongous_cont:
 147       set_state(_humongous_start);
 148       return;
 149     default:
 150       report_illegal_transition("humongous start bypass");
 151   }
 152 }
 153 
 154 void ShenandoahHeapRegion::make_humongous_cont() {
 155   shenandoah_assert_heaplocked();
 156   switch (_state) {
 157     case _empty_uncommitted:
 158       do_commit();
 159     case _empty_committed:
 160      set_state(_humongous_cont);
 161       return;
 162     default:
 163       report_illegal_transition("humongous continuation allocation");
 164   }
 165 }
 166 
 167 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
 168   shenandoah_assert_heaplocked();
 169   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 170 
 171   switch (_state) {
 172     case _empty_committed:
 173     case _regular:
 174     case _humongous_start:
 175     case _humongous_cont:
 176       set_state(_humongous_cont);
 177       return;
 178     default:
 179       report_illegal_transition("humongous continuation bypass");
 180   }
 181 }
 182 
 183 void ShenandoahHeapRegion::make_pinned() {
 184   shenandoah_assert_heaplocked();
 185   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 186 
 187   switch (_state) {
 188     case _regular:
 189       set_state(_pinned);
 190     case _pinned_cset:
 191     case _pinned:
 192       return;
 193     case _humongous_start:
 194       set_state(_pinned_humongous_start);
 195     case _pinned_humongous_start:
 196       return;
 197     case _cset:
 198       _state = _pinned_cset;
 199       return;
 200     default:
 201       report_illegal_transition("pinning");
 202   }
 203 }
 204 
 205 void ShenandoahHeapRegion::make_unpinned() {
 206   shenandoah_assert_heaplocked();
 207   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 208 
 209   switch (_state) {
 210     case _pinned:
 211       set_state(_regular);
 212       return;
 213     case _regular:
 214     case _humongous_start:
 215       return;
 216     case _pinned_cset:
 217       set_state(_cset);
 218       return;
 219     case _pinned_humongous_start:
 220       set_state(_humongous_start);
 221       return;
 222     default:
 223       report_illegal_transition("unpinning");
 224   }
 225 }
 226 
 227 void ShenandoahHeapRegion::make_cset() {
 228   shenandoah_assert_heaplocked();
 229   switch (_state) {
 230     case _regular:
 231       set_state(_cset);
 232     case _cset:
 233       return;
 234     default:
 235       report_illegal_transition("cset");
 236   }
 237 }
 238 
 239 void ShenandoahHeapRegion::make_trash() {
 240   shenandoah_assert_heaplocked();
 241   switch (_state) {
 242     case _cset:
 243       // Reclaiming cset regions
 244     case _humongous_start:
 245     case _humongous_cont:
 246       // Reclaiming humongous regions
 247     case _regular:
 248       // Immediate region reclaim
 249       set_state(_trash);
 250       return;
 251     default:
 252       report_illegal_transition("trashing");
 253   }
 254 }
 255 
 256 void ShenandoahHeapRegion::make_trash_immediate() {
 257   make_trash();
 258 
 259   // On this path, we know there are no marked objects in the region,
 260   // tell marking context about it to bypass bitmap resets.
 261   ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
 262 }
 263 
 264 void ShenandoahHeapRegion::make_empty() {
 265   shenandoah_assert_heaplocked();
 266   switch (_state) {
 267     case _trash:
 268       set_state(_empty_committed);
 269       _empty_time = os::elapsedTime();
 270       return;
 271     default:
 272       report_illegal_transition("emptying");
 273   }
 274 }
 275 
 276 void ShenandoahHeapRegion::make_uncommitted() {
 277   shenandoah_assert_heaplocked();
 278   switch (_state) {
 279     case _empty_committed:
 280       do_uncommit();
 281       set_state(_empty_uncommitted);
 282       return;
 283     default:
 284       report_illegal_transition("uncommiting");
 285   }
 286 }
 287 
 288 void ShenandoahHeapRegion::make_committed_bypass() {
 289   shenandoah_assert_heaplocked();
 290   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 291 
 292   switch (_state) {
 293     case _empty_uncommitted:
 294       do_commit();
 295       set_state(_empty_committed);
 296       return;
 297     default:
 298       report_illegal_transition("commit bypass");
 299   }
 300 }
 301 
 302 void ShenandoahHeapRegion::reset_alloc_metadata() {
 303   _tlab_allocs = 0;
 304   _gclab_allocs = 0;
 305 }
 306 
 307 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 308   return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
 309 }
 310 
 311 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 312   return _tlab_allocs * HeapWordSize;
 313 }
 314 
 315 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 316   return _gclab_allocs * HeapWordSize;
 317 }
 318 
 319 void ShenandoahHeapRegion::set_live_data(size_t s) {
 320   assert(Thread::current()->is_VM_thread(), "by VM thread");
 321   _live_data = (s >> LogHeapWordSize);
 322 }
 323 
 324 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 325   st->print("|");
 326   st->print(SIZE_FORMAT_W(5), this->_index);
 327 
 328   switch (_state) {
 329     case _empty_uncommitted:
 330       st->print("|EU ");
 331       break;
 332     case _empty_committed:
 333       st->print("|EC ");
 334       break;
 335     case _regular:
 336       st->print("|R  ");
 337       break;
 338     case _humongous_start:
 339       st->print("|H  ");
 340       break;
 341     case _pinned_humongous_start:
 342       st->print("|HP ");
 343       break;
 344     case _humongous_cont:
 345       st->print("|HC ");
 346       break;
 347     case _cset:
 348       st->print("|CS ");
 349       break;
 350     case _trash:
 351       st->print("|T  ");
 352       break;
 353     case _pinned:
 354       st->print("|P  ");
 355       break;
 356     case _pinned_cset:
 357       st->print("|CSP");
 358       break;
 359     default:
 360       ShouldNotReachHere();
 361   }
 362   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
 363             p2i(bottom()), p2i(top()), p2i(end()));
 364   st->print("|TAMS " INTPTR_FORMAT_W(12),
 365             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 366   st->print("|UWM " INTPTR_FORMAT_W(12),
 367             p2i(_update_watermark));
 368   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 369   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 370   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 371   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 372   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 373   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 374   st->cr();
 375 }
 376 
 377 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
 378   if (!is_active()) return;
 379   if (is_humongous()) {
 380     oop_iterate_humongous(blk);
 381   } else {
 382     oop_iterate_objects(blk);
 383   }
 384 }
 385 
 386 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
 387   assert(! is_humongous(), "no humongous region here");
 388   HeapWord* obj_addr = bottom();
 389   HeapWord* t = top();
 390   // Could call objects iterate, but this is easier.
 391   while (obj_addr < t) {
 392     oop obj = oop(obj_addr);
 393     obj_addr += obj->oop_iterate_size(blk);
 394   }
 395 }
 396 
 397 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 398   assert(is_humongous(), "only humongous region here");
 399   // Find head.
 400   ShenandoahHeapRegion* r = humongous_start_region();
 401   assert(r->is_humongous_start(), "need humongous head here");
 402   oop obj = oop(r->bottom());
 403   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 404 }
 405 
 406 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 407   ShenandoahHeap* heap = ShenandoahHeap::heap();
 408   assert(is_humongous(), "Must be a part of the humongous region");
 409   size_t i = index();
 410   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 411   while (!r->is_humongous_start()) {
 412     assert(i > 0, "Sanity");
 413     i--;
 414     r = heap->get_region(i);
 415     assert(r->is_humongous(), "Must be a part of the humongous region");
 416   }
 417   assert(r->is_humongous_start(), "Must be");
 418   return r;
 419 }
 420 
 421 void ShenandoahHeapRegion::recycle() {
 422   set_top(bottom());
 423   clear_live_data();
 424 
 425   reset_alloc_metadata();
 426 
 427   ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
 428   set_update_watermark(bottom());
 429 
 430   make_empty();
 431 
 432   if (ZapUnusedHeapArea) {
 433     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 434   }
 435 }
 436 
 437 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 438   assert(MemRegion(bottom(), end()).contains(p),
 439          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 440          p2i(p), p2i(bottom()), p2i(end()));
 441   if (p >= top()) {
 442     return top();
 443   } else {
 444     HeapWord* last = bottom();
 445     HeapWord* cur = last;
 446     while (cur <= p) {
 447       last = cur;
 448       cur += oop(cur)->size();
 449     }
 450     shenandoah_assert_correct(NULL, oop(last));
 451     return last;
 452   }
 453 }
 454 
 455 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 456   assert(MemRegion(bottom(), end()).contains(p),
 457          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 458          p2i(p), p2i(bottom()), p2i(end()));
 459   if (p < top()) {
 460     return oop(p)->size();
 461   } else {
 462     assert(p == top(), "just checking");
 463     return pointer_delta(end(), (HeapWord*) p);
 464   }
 465 }
 466 
 467 void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 468   // Absolute minimums we should not ever break.
 469   static const size_t MIN_REGION_SIZE = 256*K;
 470 
 471   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 472     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 473   }
 474 
 475   size_t region_size;
 476   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 477     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 478       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 479                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 480                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 481                       MIN_NUM_REGIONS,
 482                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 483       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 484     }
 485     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 486       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 487                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 488                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 489       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 490     }
 491     if (ShenandoahMinRegionSize < MinTLABSize) {
 492       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 493                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 494                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
 495       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 496     }
 497     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 498       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
 499                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
 500                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 501       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 502     }
 503     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 504       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
 505                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 506                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 507       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 508     }
 509 
 510     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 511     // for usual heap sizes. Do not depend on initial_heap_size here.
 512     region_size = max_heap_size / ShenandoahTargetNumRegions;
 513 
 514     // Now make sure that we don't go over or under our limits.
 515     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 516     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 517 
 518   } else {
 519     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 520       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 521                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
 522                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 523                       MIN_NUM_REGIONS,
 524                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
 525       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 526     }
 527     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
 528       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
 529                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 530                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
 531       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 532     }
 533     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
 534       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
 535                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 536                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 537       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 538     }
 539     region_size = ShenandoahRegionSize;
 540   }
 541 
 542   // Make sure region size is at least one large page, if enabled.
 543   // Otherwise, uncommitting one region may falsely uncommit the adjacent
 544   // regions too.
 545   // Also see shenandoahArguments.cpp, where it handles UseLargePages.
 546   if (UseLargePages && ShenandoahUncommit) {
 547     region_size = MAX2(region_size, os::large_page_size());
 548   }
 549 
 550   int region_size_log = log2_long((jlong) region_size);
 551   // Recalculate the region size to make sure it's a power of
 552   // 2. This means that region_size is the largest power of 2 that's
 553   // <= what we've calculated so far.
 554   region_size = size_t(1) << region_size_log;
 555 
 556   // Now, set up the globals.
 557   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 558   RegionSizeBytesShift = (size_t)region_size_log;
 559 
 560   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 561   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 562 
 563   guarantee(RegionSizeBytes == 0, "we should only set it once");
 564   RegionSizeBytes = region_size;
 565   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 566   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 567 
 568   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 569   RegionSizeWordsMask = RegionSizeWords - 1;
 570 
 571   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 572   RegionSizeBytesMask = RegionSizeBytes - 1;
 573 
 574   guarantee(RegionCount == 0, "we should only set it once");
 575   RegionCount = max_heap_size / RegionSizeBytes;
 576   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
 577 
 578   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 579   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 580   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
 581   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 582 
 583   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 584   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 585   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 586 
 587   // The rationale for trimming the TLAB sizes has to do with the raciness in
 588   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 589   // about next free size, gets the answer for region #N, goes away for a while, then
 590   // tries to allocate in region #N, and fail because some other thread have claimed part
 591   // of the region #N, and then the freeset allocation code has to retire the region #N,
 592   // before moving the allocation to region #N+1.
 593   //
 594   // The worst case realizes when "answer" is "region size", which means it could
 595   // prematurely retire an entire region. Having smaller TLABs does not fix that
 596   // completely, but reduces the probability of too wasteful region retirement.
 597   // With current divisor, we will waste no more than 1/8 of region size in the worst
 598   // case. This also has a secondary effect on collection set selection: even under
 599   // the race, the regions would be at least 7/8 used, which allows relying on
 600   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 601   // below the garbage threshold that would never be considered for collection.
 602   //
 603   // The whole thing is mitigated if Elastic TLABs are enabled.
 604   //
 605   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 606   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 607   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 608 
 609   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 610   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 611   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 612 
 613   log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
 614                      RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
 615   log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
 616                      byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
 617   log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
 618                      byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
 619 }
 620 
 621 void ShenandoahHeapRegion::do_commit() {
 622   ShenandoahHeap* heap = ShenandoahHeap::heap();
 623   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 624     report_java_out_of_memory("Unable to commit region");
 625   }
 626   if (!heap->commit_bitmap_slice(this)) {
 627     report_java_out_of_memory("Unable to commit bitmaps for region");
 628   }
 629   if (AlwaysPreTouch) {
 630     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
 631   }
 632   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 633 }
 634 
 635 void ShenandoahHeapRegion::do_uncommit() {
 636   ShenandoahHeap* heap = ShenandoahHeap::heap();
 637   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 638     report_java_out_of_memory("Unable to uncommit region");
 639   }
 640   if (!heap->uncommit_bitmap_slice(this)) {
 641     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 642   }
 643   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 644 }
 645 
 646 void ShenandoahHeapRegion::set_state(RegionState to) {
 647   EventShenandoahHeapRegionStateChange evt;
 648   if (evt.should_commit()){
 649     evt.set_index((unsigned) index());
 650     evt.set_start((uintptr_t)bottom());
 651     evt.set_used(used());
 652     evt.set_from(_state);
 653     evt.set_to(to);
 654     evt.commit();
 655   }
 656   _state = to;
 657 }
 658 
 659 void ShenandoahHeapRegion::record_pin() {
 660   Atomic::add(&_critical_pins, (size_t)1);
 661 }
 662 
 663 void ShenandoahHeapRegion::record_unpin() {
 664   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
 665   Atomic::sub(&_critical_pins, (size_t)1);
 666 }
 667 
 668 size_t ShenandoahHeapRegion::pin_count() const {
 669   return Atomic::load(&_critical_pins);
 670 }