1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "gc/shenandoah/brooksPointer.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  31 #include "gc/shared/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/java.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/os.hpp"
  37 #include "runtime/safepoint.hpp"
  38 
  39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  40 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  41 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  42 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  43 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  44 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  45 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  46 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  47 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  48 
  49 // start with 1, reserve 0 for uninitialized value
  50 uint64_t ShenandoahHeapRegion::AllocSeqNum = 1;
  51 
  52 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
  53                                            size_t size_words, size_t index, bool committed) :
  54   _heap(heap),
  55   _region_number(index),
  56   _live_data(0),
  57   _tlab_allocs(0),
  58   _gclab_allocs(0),
  59   _shared_allocs(0),
  60   _reserved(MemRegion(start, size_words)),
  61   _root(false),
  62   _new_top(NULL),
  63   _first_alloc_seq_num(0),
  64   _last_alloc_seq_num(0),
  65   _state(committed ? _empty_committed : _empty_uncommitted),
  66   _empty_time(os::elapsedTime()),
  67   _critical_pins(0) {
  68 
  69   ContiguousSpace::initialize(_reserved, true, committed);
  70 }
  71 
  72 size_t ShenandoahHeapRegion::region_number() const {
  73   return _region_number;
  74 }
  75 
  76 void ShenandoahHeapRegion::make_regular_allocation() {
  77   _heap->assert_heaplock_owned_by_current_thread();
  78 
  79   _last_alloc_seq_num = AllocSeqNum++;
  80 
  81   switch (_state) {
  82     case _empty_uncommitted:
  83       do_commit();
  84     case _empty_committed:
  85       assert(_first_alloc_seq_num == 0, "Sanity");
  86       _first_alloc_seq_num = _last_alloc_seq_num;
  87       _state = _regular;
  88     case _regular:
  89     case _pinned:
  90       return;
  91     default:
  92       fatal("Disallowed transition from %s to %s",
  93             region_state_to_string(_state),
  94             region_state_to_string(_regular));
  95   }
  96 }
  97 
  98 void ShenandoahHeapRegion::make_regular_bypass() {
  99   _heap->assert_heaplock_owned_by_current_thread();
 100   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 101 
 102   _last_alloc_seq_num = AllocSeqNum++;
 103 
 104   switch (_state) {
 105     case _empty_uncommitted:
 106       do_commit();
 107     case _empty_committed:
 108       assert(_first_alloc_seq_num == 0, "Sanity");
 109       _first_alloc_seq_num = _last_alloc_seq_num;
 110     case _cset:
 111       _state = _regular;
 112     case _regular:
 113     case _pinned:
 114       return;
 115     default:
 116       fatal("Disallowed transition from %s to %s",
 117             region_state_to_string(_state),
 118             region_state_to_string(_regular));
 119   }
 120 }
 121 
 122 void ShenandoahHeapRegion::make_humongous_start() {
 123   _heap->assert_heaplock_owned_by_current_thread();
 124   switch (_state) {
 125     case _empty_uncommitted:
 126       do_commit();
 127     case _empty_committed:
 128       assert(_first_alloc_seq_num == 0, "Sanity");
 129       _last_alloc_seq_num = AllocSeqNum++;
 130       _first_alloc_seq_num = _last_alloc_seq_num;
 131       _state = _humongous_start;
 132       return;
 133     default:
 134       fatal("Disallowed transition from %s to %s",
 135             region_state_to_string(_state),
 136             region_state_to_string(_humongous_start));
 137   }
 138 }
 139 
 140 void ShenandoahHeapRegion::make_humongous_cont() {
 141   _heap->assert_heaplock_owned_by_current_thread();
 142   switch (_state) {
 143     case _empty_uncommitted:
 144       do_commit();
 145     case _empty_committed:
 146       _state = _humongous_cont;
 147       assert(_first_alloc_seq_num == 0, "Sanity");
 148       _last_alloc_seq_num = AllocSeqNum++;
 149       _first_alloc_seq_num = _last_alloc_seq_num;
 150       return;
 151     default:
 152       fatal("Disallowed transition from %s to %s",
 153             region_state_to_string(_state),
 154             region_state_to_string(_humongous_cont));
 155   }
 156 }
 157 
 158 
 159 void ShenandoahHeapRegion::make_pinned() {
 160   _heap->assert_heaplock_owned_by_current_thread();
 161   switch (_state) {
 162     case _regular:
 163       assert (_critical_pins == 0, "sanity");
 164       _state = _pinned;
 165     case _pinned:
 166       _critical_pins++;
 167       return;
 168     case _humongous_start:
 169     case _humongous_cont:
 170       // Humongous objects do not move, and thus pinning is no-op.
 171       assert (_critical_pins == 0, "sanity");
 172       return;
 173     default:
 174       fatal("Disallowed transition from %s to %s",
 175             region_state_to_string(_state),
 176             region_state_to_string(_pinned));
 177   }
 178 }
 179 
 180 void ShenandoahHeapRegion::make_unpinned() {
 181   _heap->assert_heaplock_owned_by_current_thread();
 182   switch (_state) {
 183     case _pinned:
 184       assert (_critical_pins > 0, "sanity");
 185       _critical_pins--;
 186       if (_critical_pins == 0) {
 187         _state = _regular;
 188       }
 189       return;
 190     case _regular:
 191       assert (_critical_pins == 0, "sanity");
 192       return;
 193     case _humongous_start:
 194     case _humongous_cont:
 195       // Humongous objects do not move, and thus pinning is no-op.
 196       assert (_critical_pins == 0, "sanity");
 197       return;
 198     default:
 199       fatal("Disallowed transition from %s to %s",
 200             region_state_to_string(_state),
 201             region_state_to_string(_regular));
 202   }
 203 }
 204 
 205 void ShenandoahHeapRegion::make_cset() {
 206   _heap->assert_heaplock_owned_by_current_thread();
 207   switch (_state) {
 208     case _regular:
 209       _state = _cset;
 210     case _cset:
 211       return;
 212     default:
 213       fatal("Disallowed transition from %s to %s",
 214             region_state_to_string(_state),
 215             region_state_to_string(_cset));
 216   }
 217 }
 218 
 219 void ShenandoahHeapRegion::make_trash() {
 220   _heap->assert_heaplock_owned_by_current_thread();
 221   switch (_state) {
 222     case _cset:
 223       // Reclaiming cset regions
 224     case _humongous_start:
 225     case _humongous_cont:
 226       // Reclaiming humongous regions
 227     case _regular:
 228       // Immediate region reclaim
 229       _state = _trash;
 230       return;
 231     default:
 232       fatal("Disallowed transition from %s to %s",
 233             region_state_to_string(_state),
 234             region_state_to_string(_trash));
 235   }
 236 }
 237 
 238 void ShenandoahHeapRegion::make_empty_committed() {
 239   _heap->assert_heaplock_owned_by_current_thread();
 240   switch (_state) {
 241     case _trash:
 242       _state = _empty_committed;
 243       _empty_time = os::elapsedTime();
 244     case _empty_committed:
 245       return;
 246     default:
 247       fatal("Disallowed transition from %s to %s",
 248             region_state_to_string(_state),
 249             region_state_to_string(_empty_committed));
 250   }
 251 }
 252 
 253 bool ShenandoahHeapRegion::make_empty_uncommitted() {
 254   _heap->assert_heaplock_owned_by_current_thread();
 255   switch (_state) {
 256     case _empty_committed:
 257       do_uncommit();
 258       _state = _empty_uncommitted;
 259       return true;
 260     case _empty_uncommitted:
 261       return false;
 262     default:
 263       fatal("Disallowed transition from %s to %s",
 264             region_state_to_string(_state),
 265             region_state_to_string(_empty_uncommitted));
 266       return false;
 267   }
 268 }
 269 
 270 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
 271   set_top(top() - size);
 272   return true;
 273 }
 274 
 275 void ShenandoahHeapRegion::clear_live_data() {
 276   OrderAccess::release_store_fence(&_live_data, 0);
 277 }
 278 
 279 void ShenandoahHeapRegion::reset_alloc_stats() {
 280   _tlab_allocs = 0;
 281   _gclab_allocs = 0;
 282   _shared_allocs = 0;
 283 }
 284 
 285 void ShenandoahHeapRegion::reset_alloc_stats_to_shared() {
 286   _tlab_allocs = 0;
 287   _gclab_allocs = 0;
 288   _shared_allocs = used() >> LogHeapWordSize;
 289 }
 290 
 291 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 292   return _shared_allocs * HeapWordSize;
 293 }
 294 
 295 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 296   return _tlab_allocs * HeapWordSize;
 297 }
 298 
 299 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 300   return _gclab_allocs * HeapWordSize;
 301 }
 302 
 303 void ShenandoahHeapRegion::set_live_data(size_t s) {
 304   assert(Thread::current()->is_VM_thread(), "by VM thread");
 305   _live_data = (jint) (s >> LogHeapWordSize);
 306 }
 307 
 308 size_t ShenandoahHeapRegion::get_live_data_words() const {
 309   if (is_humongous()) {
 310     if (is_humongous_start()) {
 311       size_t live_data = (size_t)OrderAccess::load_acquire(&_live_data);
 312       return (live_data == 0) ? 0 : (used() >> LogHeapWordSize);
 313     } else {
 314       const ShenandoahHeapRegion* start = humongous_start_region();
 315       return start->get_live_data_words() == 0 ? 0 : (used() >> LogHeapWordSize);
 316     }
 317   } else {
 318     return (size_t)OrderAccess::load_acquire(&_live_data);
 319   }
 320 }
 321 
 322 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
 323   return get_live_data_words() * HeapWordSize;
 324 }
 325 
 326 bool ShenandoahHeapRegion::has_live() const {
 327   return get_live_data_words() != 0;
 328 }
 329 
 330 size_t ShenandoahHeapRegion::garbage() const {
 331   assert(used() >= get_live_data_bytes(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
 332          get_live_data_bytes(), used());
 333 
 334   size_t result = used() - get_live_data_bytes();
 335   return result;
 336 }
 337 
 338 bool ShenandoahHeapRegion::in_collection_set() const {
 339   return _heap->region_in_collection_set(_region_number);
 340 }
 341 
 342 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 343   st->print("|" PTR_FORMAT, p2i(this));
 344   st->print("|" SIZE_FORMAT_W(5), this->_region_number);
 345 
 346   switch (_state) {
 347     case _empty_uncommitted:
 348       st->print("|EU");
 349       break;
 350     case _empty_committed:
 351       st->print("|EC");
 352       break;
 353     case _regular:
 354       st->print("|R ");
 355       break;
 356     case _humongous_start:
 357       st->print("|H ");
 358       break;
 359     case _humongous_cont:
 360       st->print("|HC");
 361       break;
 362     case _cset:
 363       st->print("|CS");
 364       break;
 365     case _trash:
 366       st->print("|T ");
 367       break;
 368     case _pinned:
 369       st->print("|P ");
 370       break;
 371     default:
 372       ShouldNotReachHere();
 373   }
 374   st->print("|BTE " PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT,
 375             p2i(bottom()), p2i(top()), p2i(end()));
 376   st->print("|U %3d%%", (int) ((double) used() * 100 / capacity()));
 377   st->print("|T %3d%%", (int) ((double) get_tlab_allocs() * 100 / capacity()));
 378   st->print("|G %3d%%", (int) ((double) get_gclab_allocs() * 100 / capacity()));
 379   st->print("|S %3d%%", (int) ((double) get_shared_allocs() * 100 / capacity()));
 380   st->print("|L %3d%%", (int) ((double) get_live_data_bytes() * 100 / capacity()));
 381   st->print("|FTS " UINT64_FORMAT_W(15), first_alloc_seq_num());
 382   st->print("|LTS " UINT64_FORMAT_W(15), last_alloc_seq_num());
 383   if (is_root()) {
 384     st->print("|R");
 385   } else {
 386     st->print("| ");
 387   }
 388   st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
 389 
 390   st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|",
 391                p2i(ShenandoahHeap::heap()->complete_top_at_mark_start(_bottom)),
 392                p2i(ShenandoahHeap::heap()->next_top_at_mark_start(_bottom)));
 393 }
 394 
 395 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
 396   HeapWord* p = bottom() + BrooksPointer::word_size();
 397   while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) {
 398     blk->do_object(oop(p));
 399     p += oop(p)->size() + BrooksPointer::word_size();
 400   }
 401 }
 402 
 403 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
 404   HeapWord * limit = concurrent_iteration_safe_limit();
 405   assert(limit <= top(), "sanity check");
 406   for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) {
 407     size_t size = blk->do_object_careful(oop(p));
 408     if (size == 0) {
 409       return p;  // failed at p
 410     }
 411     p += size + BrooksPointer::word_size();
 412   }
 413   return NULL; // all done
 414 }
 415 
 416 void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) {
 417   if (!is_active()) return;
 418   if (is_humongous()) {
 419     oop_iterate_humongous(blk);
 420   } else {
 421     oop_iterate_objects(blk);
 422   }
 423 }
 424 
 425 void ShenandoahHeapRegion::oop_iterate_objects(ExtendedOopClosure* blk) {
 426   assert(! is_humongous(), "no humongous region here");
 427   HeapWord* obj_addr = bottom() + BrooksPointer::word_size();
 428   HeapWord* t = top();
 429   // Could call objects iterate, but this is easier.
 430   while (obj_addr < t) {
 431     oop obj = oop(obj_addr);
 432     obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size();
 433   }
 434 }
 435 
 436 void ShenandoahHeapRegion::oop_iterate_humongous(ExtendedOopClosure* blk) {
 437   assert(is_humongous(), "only humongous region here");
 438   // Find head.
 439   ShenandoahHeapRegion* r = humongous_start_region();
 440   assert(r->is_humongous_start(), "need humongous head here");
 441   oop obj = oop(r->bottom() + BrooksPointer::word_size());
 442   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 443 }
 444 
 445 void ShenandoahHeapRegion::fill_region() {
 446   ShenandoahHeap* sh = ShenandoahHeap::heap();
 447 
 448   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 449     HeapWord* filler = allocate(BrooksPointer::word_size(), ShenandoahHeap::_alloc_shared);
 450     HeapWord* obj = allocate(end() - top(), ShenandoahHeap::_alloc_shared);
 451     sh->fill_with_object(obj, end() - obj);
 452     BrooksPointer::initialize(oop(obj));
 453   }
 454 }
 455 
 456 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 457   assert(is_humongous(), "Must be a part of the humongous region");
 458   size_t reg_num = region_number();
 459   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 460   while (!r->is_humongous_start()) {
 461     assert(reg_num > 0, "Sanity");
 462     reg_num --;
 463     r = _heap->regions()->get(reg_num);
 464     assert(r->is_humongous(), "Must be a part of the humongous region");
 465   }
 466   assert(r->is_humongous_start(), "Must be");
 467   return r;
 468 }
 469 
 470 void ShenandoahHeapRegion::recycle_no_matrix() {
 471   ContiguousSpace::clear(false);
 472   if (ZapUnusedHeapArea) {
 473     ContiguousSpace::mangle_unused_area_complete();
 474   }
 475   clear_live_data();
 476   _root = false;
 477   reset_alloc_stats();
 478 
 479   // Reset seq numbers
 480   _first_alloc_seq_num = 0;
 481   _last_alloc_seq_num = 0;
 482 
 483   // Reset C-TAMS pointer to ensure size-based iteration, everything
 484   // in that regions is going to be new objects.
 485   _heap->set_complete_top_at_mark_start(bottom(), bottom());
 486   // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region.
 487   assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear");
 488 
 489   make_empty_committed();
 490 }
 491 
 492 void ShenandoahHeapRegion::recycle() {
 493   recycle_no_matrix();
 494   if (UseShenandoahMatrix) {
 495     _heap->connection_matrix()->clear_region(region_number());
 496   }
 497 }
 498 
 499 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 500   assert(MemRegion(bottom(), end()).contains(p),
 501          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 502          p2i(p), p2i(bottom()), p2i(end()));
 503   if (p >= top()) {
 504     return top();
 505   } else {
 506     HeapWord* last = bottom() + BrooksPointer::word_size();
 507     HeapWord* cur = last;
 508     while (cur <= p) {
 509       last = cur;
 510       cur += oop(cur)->size() + BrooksPointer::word_size();
 511     }
 512     assert(oopDesc::is_oop(oop(last)),
 513            PTR_FORMAT" should be an object start", p2i(last));
 514     return last;
 515   }
 516 }
 517 
 518 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 519   // Absolute minimums we should not ever break.
 520   // Minimum region size should be to fit at least one page for mark bitmap for the region,
 521   // or at least 256K, whatever is larger.
 522   static const size_t MIN_REGION_SIZE = MAX2(256*K, os::vm_page_size() * MarkBitMap::heap_map_factor());
 523   static const size_t MIN_NUM_REGIONS = 10;
 524 
 525   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 526     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 527   }
 528 
 529   uintx region_size;
 530   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 531     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 532       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 533                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
 534                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
 535       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 536     }
 537     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 538       err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
 539                       ShenandoahMinRegionSize/K,  MIN_REGION_SIZE/K);
 540       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 541     }
 542     if (ShenandoahMinRegionSize < MinTLABSize) {
 543       err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
 544                       ShenandoahMinRegionSize/K,  MinTLABSize/K);
 545       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 546     }
 547     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 548       err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
 549                       ShenandoahMaxRegionSize/K,  MIN_REGION_SIZE/K);
 550       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 551     }
 552     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 553       err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
 554                       ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
 555       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 556     }
 557     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 558     region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
 559                        ShenandoahMinRegionSize);
 560 
 561     // Now make sure that we don't go over or under our limits.
 562     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 563     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 564 
 565   } else {
 566     if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 567       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 568                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
 569                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
 570       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 571     }
 572     if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
 573       err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
 574                       ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
 575       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 576     }
 577     if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
 578       err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
 579                       ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
 580       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 581     }
 582     region_size = ShenandoahHeapRegionSize;
 583   }
 584 
 585   // Make sure region size is at least one large page, if enabled.
 586   // Otherwise, mem-protecting one region may falsely protect the adjacent
 587   // regions too.
 588   if (UseLargePages) {
 589     region_size = MAX2(region_size, os::large_page_size());
 590   }
 591 
 592   int region_size_log = log2_long((jlong) region_size);
 593   // Recalculate the region size to make sure it's a power of
 594   // 2. This means that region_size is the largest power of 2 that's
 595   // <= what we've calculated so far.
 596   region_size = ((uintx)1 << region_size_log);
 597 
 598   // Now, set up the globals.
 599   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 600   RegionSizeBytesShift = (size_t)region_size_log;
 601 
 602   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 603   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 604 
 605   guarantee(RegionSizeBytes == 0, "we should only set it once");
 606   RegionSizeBytes = (size_t)region_size;
 607   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 608   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 609 
 610   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 611   RegionSizeWordsMask = RegionSizeWords - 1;
 612 
 613   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 614   RegionSizeBytesMask = RegionSizeBytes - 1;
 615 
 616   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 617   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 618   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 619 
 620   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 621   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 622   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 623 
 624   // The rationale for trimming the TLAB sizes has to do with the raciness in
 625   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 626   // about next free size, gets the answer for region #N, goes away for a while, then
 627   // tries to allocate in region #N, and fail because some other thread have claimed part
 628   // of the region #N, and then the freeset allocation code has to retire the region #N,
 629   // before moving the allocation to region #N+1.
 630   //
 631   // The worst case realizes when "answer" is "region size", which means it could
 632   // prematurely retire an entire region. Having smaller TLABs does not fix that
 633   // completely, but reduces the probability of too wasteful region retirement.
 634   // With current divisor, we will waste no more than 1/8 of region size in the worst
 635   // case. This also has a secondary effect on collection set selection: even under
 636   // the race, the regions would be at least 7/8 used, which allows relying on
 637   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 638   // below the garbage threshold that would never be considered for collection.
 639   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 640   MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes);
 641   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 642 
 643   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 644   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 645   log_info(gc, init)("Region size byte shift: "SIZE_FORMAT, RegionSizeBytesShift);
 646   log_info(gc, init)("Humongous threshold in bytes: "SIZE_FORMAT, HumongousThresholdBytes);
 647   log_info(gc, init)("Max TLAB size in bytes: "SIZE_FORMAT, MaxTLABSizeBytes);
 648   log_info(gc, init)("Number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 649 }
 650 
 651 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 652   return _heap->next_compaction_region(this);
 653 }
 654 
 655 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 656   scan_and_forward(this, cp);
 657 }
 658 
 659 void ShenandoahHeapRegion::adjust_pointers() {
 660   // Check first is there is any work to do.
 661   if (used() == 0) {
 662     return;   // Nothing to do.
 663   }
 664 
 665   scan_and_adjust_pointers(this);
 666 }
 667 
 668 void ShenandoahHeapRegion::compact() {
 669   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 670   scan_and_compact(this);
 671 }
 672 
 673 void ShenandoahHeapRegion::do_commit() {
 674   if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
 675     report_java_out_of_memory("Unable to commit region");
 676   }
 677   if (!_heap->commit_bitmaps(this)) {
 678     report_java_out_of_memory("Unable to commit bitmaps for region");
 679   }
 680   _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 681 }
 682 
 683 void ShenandoahHeapRegion::do_uncommit() {
 684   if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
 685     report_java_out_of_memory("Unable to uncommit region");
 686   }
 687   if (!_heap->uncommit_bitmaps(this)) {
 688     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 689   }
 690   _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 691 }