1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  29 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  30 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  31 #include "gc/shared/space.inline.hpp"
  32 #include "jfr/jfrEvents.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "runtime/atomic.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/os.hpp"
  41 #include "runtime/safepoint.hpp"
  42 
  43 size_t ShenandoahHeapRegion::RegionCount = 0;
  44 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  45 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  46 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  47 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  48 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  49 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  50 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  51 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  52 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  53 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  54 
  55 ShenandoahHeapRegion::PaddedAllocSeqNum ShenandoahHeapRegion::_alloc_seq_num;
  56 
  57 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
  58                                            size_t size_words, size_t index, bool committed) :
  59   _heap(heap),
  60   _reserved(MemRegion(start, size_words)),
  61   _region_number(index),
  62   _new_top(NULL),
  63   _empty_time(os::elapsedTime()),
  64   _state(committed ? _empty_committed : _empty_uncommitted),
  65   _tlab_allocs(0),
  66   _gclab_allocs(0),
  67   _shared_allocs(0),
  68   _seqnum_first_alloc_mutator(0),
  69   _seqnum_first_alloc_gc(0),
  70   _seqnum_last_alloc_mutator(0),
  71   _seqnum_last_alloc_gc(0),
  72   _live_data(0),
  73   _critical_pins(0) {
  74 
  75   ContiguousSpace::initialize(_reserved, true, committed);
  76 }
  77 
  78 size_t ShenandoahHeapRegion::region_number() const {
  79   return _region_number;
  80 }
  81 
  82 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  83   ResourceMark rm;
  84   stringStream ss;
  85   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  86   print_on(&ss);
  87   fatal("%s", ss.as_string());
  88 }
  89 
  90 void ShenandoahHeapRegion::make_regular_allocation() {
  91   _heap->assert_heaplock_owned_by_current_thread();
  92 
  93   switch (_state) {
  94     case _empty_uncommitted:
  95       do_commit();
  96     case _empty_committed:
  97       set_state(_regular);
  98     case _regular:
  99     case _pinned:
 100       return;
 101     default:
 102       report_illegal_transition("regular allocation");
 103   }
 104 }
 105 
 106 void ShenandoahHeapRegion::make_regular_bypass() {
 107   _heap->assert_heaplock_owned_by_current_thread();
 108   assert (_heap->is_full_gc_in_progress() || _heap->is_degenerated_gc_in_progress(),
 109           "only for full or degen GC");
 110 
 111   switch (_state) {
 112     case _empty_uncommitted:
 113       do_commit();
 114     case _empty_committed:
 115     case _cset:
 116     case _humongous_start:
 117     case _humongous_cont:
 118       set_state(_regular);
 119       return;
 120     case _pinned_cset:
 121       set_state(_pinned);
 122       return;
 123     case _regular:
 124     case _pinned:
 125       return;
 126     default:
 127       report_illegal_transition("regular bypass");
 128   }
 129 }
 130 
 131 void ShenandoahHeapRegion::make_humongous_start() {
 132   _heap->assert_heaplock_owned_by_current_thread();
 133   switch (_state) {
 134     case _empty_uncommitted:
 135       do_commit();
 136     case _empty_committed:
 137       set_state(_humongous_start);
 138       return;
 139     default:
 140       report_illegal_transition("humongous start allocation");
 141   }
 142 }
 143 
 144 void ShenandoahHeapRegion::make_humongous_start_bypass() {
 145   _heap->assert_heaplock_owned_by_current_thread();
 146   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 147 
 148   switch (_state) {
 149     case _empty_committed:
 150     case _regular:
 151     case _humongous_start:
 152     case _humongous_cont:
 153       set_state(_humongous_start);
 154       return;
 155     default:
 156       report_illegal_transition("humongous start bypass");
 157   }
 158 }
 159 
 160 void ShenandoahHeapRegion::make_humongous_cont() {
 161   _heap->assert_heaplock_owned_by_current_thread();
 162   switch (_state) {
 163     case _empty_uncommitted:
 164       do_commit();
 165     case _empty_committed:
 166      set_state(_humongous_cont);
 167       return;
 168     default:
 169       report_illegal_transition("humongous continuation allocation");
 170   }
 171 }
 172 
 173 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
 174   _heap->assert_heaplock_owned_by_current_thread();
 175   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 176 
 177   switch (_state) {
 178     case _empty_committed:
 179     case _regular:
 180     case _humongous_start:
 181     case _humongous_cont:
 182       set_state(_humongous_cont);
 183       return;
 184     default:
 185       report_illegal_transition("humongous continuation bypass");
 186   }
 187 }
 188 
 189 void ShenandoahHeapRegion::make_pinned() {
 190   _heap->assert_heaplock_owned_by_current_thread();
 191   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 192 
 193   switch (_state) {
 194     case _regular:
 195       set_state(_pinned);
 196     case _pinned_cset:
 197     case _pinned:
 198       return;
 199     case _humongous_start:
 200       set_state(_pinned_humongous_start);
 201     case _pinned_humongous_start:
 202       return;
 203     case _cset:
 204       _state = _pinned_cset;
 205       return;
 206     default:
 207       report_illegal_transition("pinning");
 208   }
 209 }
 210 
 211 void ShenandoahHeapRegion::make_unpinned() {
 212   _heap->assert_heaplock_owned_by_current_thread();
 213   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 214 
 215   switch (_state) {
 216     case _pinned:
 217       set_state(_regular);
 218       return;
 219     case _regular:
 220     case _humongous_start:
 221       return;
 222     case _pinned_cset:
 223       set_state(_cset);
 224       return;
 225     case _pinned_humongous_start:
 226       set_state(_humongous_start);
 227       return;
 228     default:
 229       report_illegal_transition("unpinning");
 230   }
 231 }
 232 
 233 void ShenandoahHeapRegion::make_cset() {
 234   _heap->assert_heaplock_owned_by_current_thread();
 235   switch (_state) {
 236     case _regular:
 237       set_state(_cset);
 238     case _cset:
 239       return;
 240     default:
 241       report_illegal_transition("cset");
 242   }
 243 }
 244 
 245 void ShenandoahHeapRegion::make_trash() {
 246   _heap->assert_heaplock_owned_by_current_thread();
 247   switch (_state) {
 248     case _cset:
 249       // Reclaiming cset regions
 250     case _humongous_start:
 251     case _humongous_cont:
 252       // Reclaiming humongous regions
 253     case _regular:
 254       // Immediate region reclaim
 255       set_state(_trash);
 256       return;
 257     default:
 258       report_illegal_transition("trashing");
 259   }
 260 }
 261 
 262 void ShenandoahHeapRegion::make_trash_immediate() {
 263   make_trash();
 264 
 265   // On this path, we know there are no marked objects in the region,
 266   // tell marking context about it to bypass bitmap resets.
 267   _heap->complete_marking_context()->reset_top_bitmap(this);
 268 }
 269 
 270 void ShenandoahHeapRegion::make_empty() {
 271   _heap->assert_heaplock_owned_by_current_thread();
 272   switch (_state) {
 273     case _trash:
 274       set_state(_empty_committed);
 275       _empty_time = os::elapsedTime();
 276       return;
 277     default:
 278       report_illegal_transition("emptying");
 279   }
 280 }
 281 
 282 void ShenandoahHeapRegion::make_uncommitted() {
 283   _heap->assert_heaplock_owned_by_current_thread();
 284   switch (_state) {
 285     case _empty_committed:
 286       do_uncommit();
 287       set_state(_empty_uncommitted);
 288       return;
 289     default:
 290       report_illegal_transition("uncommiting");
 291   }
 292 }
 293 
 294 void ShenandoahHeapRegion::make_committed_bypass() {
 295   _heap->assert_heaplock_owned_by_current_thread();
 296   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 297 
 298   switch (_state) {
 299     case _empty_uncommitted:
 300       do_commit();
 301       set_state(_empty_committed);
 302       return;
 303     default:
 304       report_illegal_transition("commit bypass");
 305   }
 306 }
 307 
 308 void ShenandoahHeapRegion::clear_live_data() {
 309   Atomic::release_store_fence(&_live_data, (size_t)0);
 310 }
 311 
 312 void ShenandoahHeapRegion::reset_alloc_metadata() {
 313   _tlab_allocs = 0;
 314   _gclab_allocs = 0;
 315   _shared_allocs = 0;
 316   _seqnum_first_alloc_mutator = 0;
 317   _seqnum_last_alloc_mutator = 0;
 318   _seqnum_first_alloc_gc = 0;
 319   _seqnum_last_alloc_gc = 0;
 320 }
 321 
 322 void ShenandoahHeapRegion::reset_alloc_metadata_to_shared() {
 323   if (used() > 0) {
 324     _tlab_allocs = 0;
 325     _gclab_allocs = 0;
 326     _shared_allocs = used() >> LogHeapWordSize;
 327     uint64_t next = _alloc_seq_num.value++;
 328     _seqnum_first_alloc_mutator = next;
 329     _seqnum_last_alloc_mutator = next;
 330     _seqnum_first_alloc_gc = 0;
 331     _seqnum_last_alloc_gc = 0;
 332   } else {
 333     reset_alloc_metadata();
 334   }
 335 }
 336 
 337 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 338   return _shared_allocs * HeapWordSize;
 339 }
 340 
 341 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 342   return _tlab_allocs * HeapWordSize;
 343 }
 344 
 345 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 346   return _gclab_allocs * HeapWordSize;
 347 }
 348 
 349 void ShenandoahHeapRegion::set_live_data(size_t s) {
 350   assert(Thread::current()->is_VM_thread(), "by VM thread");
 351   _live_data = (s >> LogHeapWordSize);
 352 }
 353 
 354 size_t ShenandoahHeapRegion::get_live_data_words() const {
 355   return Atomic::load_acquire(&_live_data);
 356 }
 357 
 358 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
 359   return get_live_data_words() * HeapWordSize;
 360 }
 361 
 362 bool ShenandoahHeapRegion::has_live() const {
 363   return get_live_data_words() != 0;
 364 }
 365 
 366 size_t ShenandoahHeapRegion::garbage() const {
 367   assert(used() >= get_live_data_bytes(), "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT,
 368          get_live_data_bytes(), used());
 369 
 370   size_t result = used() - get_live_data_bytes();
 371   return result;
 372 }
 373 
 374 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 375   st->print("|");
 376   st->print(SIZE_FORMAT_W(5), this->_region_number);
 377 
 378   switch (_state) {
 379     case _empty_uncommitted:
 380       st->print("|EU ");
 381       break;
 382     case _empty_committed:
 383       st->print("|EC ");
 384       break;
 385     case _regular:
 386       st->print("|R  ");
 387       break;
 388     case _humongous_start:
 389       st->print("|H  ");
 390       break;
 391     case _pinned_humongous_start:
 392       st->print("|HP ");
 393       break;
 394     case _humongous_cont:
 395       st->print("|HC ");
 396       break;
 397     case _cset:
 398       st->print("|CS ");
 399       break;
 400     case _trash:
 401       st->print("|T  ");
 402       break;
 403     case _pinned:
 404       st->print("|P  ");
 405       break;
 406     case _pinned_cset:
 407       st->print("|CSP");
 408       break;
 409     default:
 410       ShouldNotReachHere();
 411   }
 412   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
 413             p2i(bottom()), p2i(top()), p2i(end()));
 414   st->print("|TAMS " INTPTR_FORMAT_W(12),
 415             p2i(_heap->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 416   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 417   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 418   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 419   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 420   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 421   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 422   st->print("|SN " UINT64_FORMAT_X_W(12) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8) ", " UINT64_FORMAT_X_W(8),
 423             seqnum_first_alloc_mutator(), seqnum_last_alloc_mutator(),
 424             seqnum_first_alloc_gc(), seqnum_last_alloc_gc());
 425   st->cr();
 426 }
 427 
 428 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
 429   if (!is_active()) return;
 430   if (is_humongous()) {
 431     oop_iterate_humongous(blk);
 432   } else {
 433     oop_iterate_objects(blk);
 434   }
 435 }
 436 
 437 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
 438   assert(! is_humongous(), "no humongous region here");
 439   HeapWord* obj_addr = bottom();
 440   HeapWord* t = top();
 441   // Could call objects iterate, but this is easier.
 442   while (obj_addr < t) {
 443     oop obj = oop(obj_addr);
 444     obj_addr += obj->oop_iterate_size(blk);
 445   }
 446 }
 447 
 448 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 449   assert(is_humongous(), "only humongous region here");
 450   // Find head.
 451   ShenandoahHeapRegion* r = humongous_start_region();
 452   assert(r->is_humongous_start(), "need humongous head here");
 453   oop obj = oop(r->bottom());
 454   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 455 }
 456 
 457 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 458   assert(is_humongous(), "Must be a part of the humongous region");
 459   size_t reg_num = region_number();
 460   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 461   while (!r->is_humongous_start()) {
 462     assert(reg_num > 0, "Sanity");
 463     reg_num --;
 464     r = _heap->get_region(reg_num);
 465     assert(r->is_humongous(), "Must be a part of the humongous region");
 466   }
 467   assert(r->is_humongous_start(), "Must be");
 468   return r;
 469 }
 470 
 471 void ShenandoahHeapRegion::recycle() {
 472   ContiguousSpace::clear(false);
 473   if (ZapUnusedHeapArea) {
 474     ContiguousSpace::mangle_unused_area_complete();
 475   }
 476   clear_live_data();
 477 
 478   reset_alloc_metadata();
 479 
 480   _heap->marking_context()->reset_top_at_mark_start(this);
 481 
 482   make_empty();
 483 }
 484 
 485 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 486   assert(MemRegion(bottom(), end()).contains(p),
 487          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 488          p2i(p), p2i(bottom()), p2i(end()));
 489   if (p >= top()) {
 490     return top();
 491   } else {
 492     HeapWord* last = bottom();
 493     HeapWord* cur = last;
 494     while (cur <= p) {
 495       last = cur;
 496       cur += oop(cur)->size();
 497     }
 498     shenandoah_assert_correct(NULL, oop(last));
 499     return last;
 500   }
 501 }
 502 
 503 void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 504   // Absolute minimums we should not ever break.
 505   static const size_t MIN_REGION_SIZE = 256*K;
 506 
 507   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 508     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 509   }
 510 
 511   size_t region_size;
 512   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 513     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 514       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 515                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 516                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 517                       MIN_NUM_REGIONS,
 518                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 519       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 520     }
 521     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 522       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 523                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 524                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 525       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 526     }
 527     if (ShenandoahMinRegionSize < MinTLABSize) {
 528       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 529                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 530                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
 531       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 532     }
 533     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 534       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
 535                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
 536                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 537       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 538     }
 539     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 540       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
 541                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 542                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 543       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 544     }
 545 
 546     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 547     // for usual heap sizes. Do not depend on initial_heap_size here.
 548     region_size = max_heap_size / ShenandoahTargetNumRegions;
 549 
 550     // Now make sure that we don't go over or under our limits.
 551     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 552     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 553 
 554   } else {
 555     if (ShenandoahHeapRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 556       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 557                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
 558                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 559                       MIN_NUM_REGIONS,
 560                       byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize));
 561       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 562     }
 563     if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
 564       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
 565                       byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize),
 566                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
 567       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 568     }
 569     if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
 570       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
 571                       byte_size_in_proper_unit(ShenandoahHeapRegionSize), proper_unit_for_byte_size(ShenandoahHeapRegionSize),
 572                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 573       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 574     }
 575     region_size = ShenandoahHeapRegionSize;
 576   }
 577 
 578   // Make sure region size is at least one large page, if enabled.
 579   // Otherwise, uncommitting one region may falsely uncommit the adjacent
 580   // regions too.
 581   // Also see shenandoahArguments.cpp, where it handles UseLargePages.
 582   if (UseLargePages && ShenandoahUncommit) {
 583     region_size = MAX2(region_size, os::large_page_size());
 584   }
 585 
 586   int region_size_log = log2_long((jlong) region_size);
 587   // Recalculate the region size to make sure it's a power of
 588   // 2. This means that region_size is the largest power of 2 that's
 589   // <= what we've calculated so far.
 590   region_size = size_t(1) << region_size_log;
 591 
 592   // Now, set up the globals.
 593   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 594   RegionSizeBytesShift = (size_t)region_size_log;
 595 
 596   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 597   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 598 
 599   guarantee(RegionSizeBytes == 0, "we should only set it once");
 600   RegionSizeBytes = region_size;
 601   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 602   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 603 
 604   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 605   RegionSizeWordsMask = RegionSizeWords - 1;
 606 
 607   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 608   RegionSizeBytesMask = RegionSizeBytes - 1;
 609 
 610   guarantee(RegionCount == 0, "we should only set it once");
 611   RegionCount = max_heap_size / RegionSizeBytes;
 612   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
 613 
 614   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 615   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 616   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
 617   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 618 
 619   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 620   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 621   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 622 
 623   // The rationale for trimming the TLAB sizes has to do with the raciness in
 624   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 625   // about next free size, gets the answer for region #N, goes away for a while, then
 626   // tries to allocate in region #N, and fail because some other thread have claimed part
 627   // of the region #N, and then the freeset allocation code has to retire the region #N,
 628   // before moving the allocation to region #N+1.
 629   //
 630   // The worst case realizes when "answer" is "region size", which means it could
 631   // prematurely retire an entire region. Having smaller TLABs does not fix that
 632   // completely, but reduces the probability of too wasteful region retirement.
 633   // With current divisor, we will waste no more than 1/8 of region size in the worst
 634   // case. This also has a secondary effect on collection set selection: even under
 635   // the race, the regions would be at least 7/8 used, which allows relying on
 636   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 637   // below the garbage threshold that would never be considered for collection.
 638   //
 639   // The whole thing is mitigated if Elastic TLABs are enabled.
 640   //
 641   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 642   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 643   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 644 
 645   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 646   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 647   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 648 
 649   log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
 650                      RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
 651   log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
 652                      byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
 653   log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
 654                      byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
 655 }
 656 
 657 void ShenandoahHeapRegion::do_commit() {
 658   if (!_heap->is_heap_region_special() && !os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
 659     report_java_out_of_memory("Unable to commit region");
 660   }
 661   if (!_heap->commit_bitmap_slice(this)) {
 662     report_java_out_of_memory("Unable to commit bitmaps for region");
 663   }
 664   _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 665 }
 666 
 667 void ShenandoahHeapRegion::do_uncommit() {
 668   if (!_heap->is_heap_region_special() && !os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
 669     report_java_out_of_memory("Unable to uncommit region");
 670   }
 671   if (!_heap->uncommit_bitmap_slice(this)) {
 672     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 673   }
 674   _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 675 }
 676 
 677 void ShenandoahHeapRegion::set_state(RegionState to) {
 678   EventShenandoahHeapRegionStateChange evt;
 679   if (evt.should_commit()){
 680     evt.set_index((unsigned)region_number());
 681     evt.set_start((uintptr_t)bottom());
 682     evt.set_used(used());
 683     evt.set_from(_state);
 684     evt.set_to(to);
 685     evt.commit();
 686   }
 687   _state = to;
 688 }
 689 
 690 void ShenandoahHeapRegion::record_pin() {
 691   Atomic::add(&_critical_pins, (size_t)1);
 692 }
 693 
 694 void ShenandoahHeapRegion::record_unpin() {
 695   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", region_number());
 696   Atomic::sub(&_critical_pins, (size_t)1);
 697 }
 698 
 699 size_t ShenandoahHeapRegion::pin_count() const {
 700   return Atomic::load(&_critical_pins);
 701 }