1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "gc/shenandoah/brooksPointer.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  31 #include "gc/shared/space.inline.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/java.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/os.hpp"
  37 #include "runtime/safepoint.hpp"
  38 
  39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  40 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  41 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  42 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  43 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  44 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  45 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  46 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  47 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  48 
  49 // start with 1, reserve 0 for uninitialized value
  50 uint64_t ShenandoahHeapRegion::AllocSeqNum = 1;
  51 
  52 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
  53                                            size_t size_words, size_t index, bool committed) :
  54   _heap(heap),
  55   _region_number(index),
  56   _live_data(0),
  57   _tlab_allocs(0),
  58   _gclab_allocs(0),
  59   _shared_allocs(0),
  60   _reserved(MemRegion(start, size_words)),
  61   _root(false),
  62   _new_top(NULL),
  63   _first_alloc_seq_num(0),
  64   _last_alloc_seq_num(0),
  65   _state(committed ? _empty_committed : _empty_uncommitted),
  66   _empty_time(os::elapsedTime()),
  67   _critical_pins(0) {
  68 
  69   ContiguousSpace::initialize(_reserved, true, committed);
  70 }
  71 
  72 size_t ShenandoahHeapRegion::region_number() const {
  73   return _region_number;
  74 }
  75 
  76 void ShenandoahHeapRegion::make_regular_allocation() {
  77   _heap->assert_heaplock_owned_by_current_thread();
  78 
  79   _last_alloc_seq_num = AllocSeqNum++;
  80 
  81   switch (_state) {
  82     case _empty_uncommitted:
  83       do_commit();
  84     case _empty_committed:
  85       assert(_first_alloc_seq_num == 0, "Sanity");
  86       _first_alloc_seq_num = _last_alloc_seq_num;
  87       _state = _regular;
  88     case _regular:
  89     case _pinned:
  90       return;
  91     default:
  92       fatal("Disallowed transition from %s to %s",
  93             region_state_to_string(_state),
  94             region_state_to_string(_regular));
  95   }
  96 }
  97 
  98 void ShenandoahHeapRegion::make_regular_bypass() {
  99   _heap->assert_heaplock_owned_by_current_thread();
 100   assert (_heap->is_full_gc_in_progress(), "only for full GC");
 101 
 102   _last_alloc_seq_num = AllocSeqNum++;
 103 
 104   switch (_state) {
 105     case _empty_uncommitted:
 106       do_commit();
 107     case _empty_committed:
 108       assert(_first_alloc_seq_num == 0, "Sanity");
 109       _first_alloc_seq_num = _last_alloc_seq_num;
 110     case _cset:
 111       _state = _regular;
 112     case _regular:
 113     case _pinned:
 114       return;
 115     default:
 116       fatal("Disallowed transition from %s to %s",
 117             region_state_to_string(_state),
 118             region_state_to_string(_regular));
 119   }
 120 }
 121 
 122 void ShenandoahHeapRegion::make_humongous_start() {
 123   _heap->assert_heaplock_owned_by_current_thread();
 124   switch (_state) {
 125     case _empty_uncommitted:
 126       do_commit();
 127     case _empty_committed:
 128       assert(_first_alloc_seq_num == 0, "Sanity");
 129       _last_alloc_seq_num = AllocSeqNum++;
 130       _first_alloc_seq_num = _last_alloc_seq_num;
 131       _state = _humongous_start;
 132       return;
 133     default:
 134       fatal("Disallowed transition from %s to %s",
 135             region_state_to_string(_state),
 136             region_state_to_string(_humongous_start));
 137   }
 138 }
 139 
 140 void ShenandoahHeapRegion::make_humongous_cont() {
 141   _heap->assert_heaplock_owned_by_current_thread();
 142   switch (_state) {
 143     case _empty_uncommitted:
 144       do_commit();
 145     case _empty_committed:
 146       _state = _humongous_cont;
 147       assert(_first_alloc_seq_num == 0, "Sanity");
 148       _last_alloc_seq_num = AllocSeqNum++;
 149       _first_alloc_seq_num = _last_alloc_seq_num;
 150       return;
 151     default:
 152       fatal("Disallowed transition from %s to %s",
 153             region_state_to_string(_state),
 154             region_state_to_string(_humongous_cont));
 155   }
 156 }
 157 
 158 
 159 void ShenandoahHeapRegion::make_pinned() {
 160   _heap->assert_heaplock_owned_by_current_thread();
 161   switch (_state) {
 162     case _regular:
 163       assert (_critical_pins == 0, "sanity");
 164       _state = _pinned;
 165     case _pinned:
 166       _critical_pins++;
 167       return;
 168     case _humongous_start:
 169     case _humongous_cont:
 170       // Humongous objects do not move, and thus pinning is no-op.
 171       assert (_critical_pins == 0, "sanity");
 172       return;
 173     default:
 174       fatal("Disallowed transition from %s to %s",
 175             region_state_to_string(_state),
 176             region_state_to_string(_pinned));
 177   }
 178 }
 179 
 180 void ShenandoahHeapRegion::make_unpinned() {
 181   _heap->assert_heaplock_owned_by_current_thread();
 182   switch (_state) {
 183     case _pinned:
 184       assert (_critical_pins > 0, "sanity");
 185       _critical_pins--;
 186       if (_critical_pins == 0) {
 187         _state = _regular;
 188       }
 189       return;
 190     case _regular:
 191       assert (_critical_pins == 0, "sanity");
 192       return;
 193     case _humongous_start:
 194     case _humongous_cont:
 195       // Humongous objects do not move, and thus pinning is no-op.
 196       assert (_critical_pins == 0, "sanity");
 197       return;
 198     default:
 199       fatal("Disallowed transition from %s to %s",
 200             region_state_to_string(_state),
 201             region_state_to_string(_regular));
 202   }
 203 }
 204 
 205 void ShenandoahHeapRegion::make_cset() {
 206   _heap->assert_heaplock_owned_by_current_thread();
 207   switch (_state) {
 208     case _regular:
 209       _state = _cset;
 210     case _cset:
 211       return;
 212     default:
 213       fatal("Disallowed transition from %s to %s",
 214             region_state_to_string(_state),
 215             region_state_to_string(_cset));
 216   }
 217 }
 218 
 219 void ShenandoahHeapRegion::make_trash() {
 220   _heap->assert_heaplock_owned_by_current_thread();
 221   switch (_state) {
 222     case _cset:
 223       // Reclaiming cset regions
 224     case _humongous_start:
 225     case _humongous_cont:
 226       // Reclaiming humongous regions
 227     case _regular:
 228       // Immediate region reclaim
 229       _state = _trash;
 230       return;
 231     default:
 232       fatal("Disallowed transition from %s to %s",
 233             region_state_to_string(_state),
 234             region_state_to_string(_trash));
 235   }
 236 }
 237 
 238 void ShenandoahHeapRegion::make_empty_committed() {
 239   _heap->assert_heaplock_owned_by_current_thread();
 240   switch (_state) {
 241     case _trash:
 242       _state = _empty_committed;
 243       _empty_time = os::elapsedTime();
 244     case _empty_committed:
 245       return;
 246     default:
 247       fatal("Disallowed transition from %s to %s",
 248             region_state_to_string(_state),
 249             region_state_to_string(_empty_committed));
 250   }
 251 }
 252 
 253 bool ShenandoahHeapRegion::make_empty_uncommitted() {
 254   _heap->assert_heaplock_owned_by_current_thread();
 255   switch (_state) {
 256     case _empty_committed:
 257       do_uncommit();
 258       _state = _empty_uncommitted;
 259       return true;
 260     case _empty_uncommitted:
 261       return false;
 262     default:
 263       fatal("Disallowed transition from %s to %s",
 264             region_state_to_string(_state),
 265             region_state_to_string(_empty_uncommitted));
 266       return false;
 267   }
 268 }
 269 
 270 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
 271   set_top(top() - size);
 272   return true;
 273 }
 274 
 275 void ShenandoahHeapRegion::clear_live_data() {
 276   OrderAccess::release_store_fence(&_live_data, 0);
 277 }
 278 
 279 void ShenandoahHeapRegion::reset_alloc_stats() {
 280   _tlab_allocs = 0;
 281   _gclab_allocs = 0;
 282   _shared_allocs = 0;
 283 }
 284 
 285 void ShenandoahHeapRegion::reset_alloc_stats_to_shared() {
 286   _tlab_allocs = 0;
 287   _gclab_allocs = 0;
 288   _shared_allocs = used() >> LogHeapWordSize;
 289 }
 290 
 291 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 292   return _shared_allocs * HeapWordSize;
 293 }
 294 
 295 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 296   return _tlab_allocs * HeapWordSize;
 297 }
 298 
 299 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 300   return _gclab_allocs * HeapWordSize;
 301 }
 302 
 303 void ShenandoahHeapRegion::set_live_data(size_t s) {
 304   assert(Thread::current()->is_VM_thread(), "by VM thread");
 305   _live_data = (jint) (s >> LogHeapWordSize);
 306 }
 307 
 308 size_t ShenandoahHeapRegion::get_live_data_words() const {
 309   if (is_humongous()) {
 310     if (is_humongous_start()) {
 311       size_t live_data = (size_t)OrderAccess::load_acquire(&_live_data);
 312       return (live_data == 0) ? 0 : (used() >> LogHeapWordSize);
 313     } else {
 314       const ShenandoahHeapRegion* start = humongous_start_region();
 315       return start->get_live_data_words() == 0 ? 0 : (used() >> LogHeapWordSize);
 316     }
 317   } else {
 318     return (size_t)OrderAccess::load_acquire(&_live_data);
 319   }
 320 }
 321 
 322 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
 323   return get_live_data_words() * HeapWordSize;
 324 }
 325 
 326 bool ShenandoahHeapRegion::has_live() const {
 327   return get_live_data_words() != 0;
 328 }
 329 
 330 size_t ShenandoahHeapRegion::garbage() const {
 331   assert(used() >= get_live_data_bytes(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
 332          get_live_data_bytes(), used());
 333 
 334   size_t result = used() - get_live_data_bytes();
 335   return result;
 336 }
 337 
 338 bool ShenandoahHeapRegion::in_collection_set() const {
 339   return _heap->region_in_collection_set(_region_number);
 340 }
 341 
 342 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 343   st->print("|" PTR_FORMAT, p2i(this));
 344   st->print("|" SIZE_FORMAT_W(5), this->_region_number);
 345 
 346   switch (_state) {
 347     case _empty_uncommitted:
 348       st->print("|EU");
 349       break;
 350     case _empty_committed:
 351       st->print("|EC");
 352       break;
 353     case _regular:
 354       st->print("|R ");
 355       break;
 356     case _humongous_start:
 357       st->print("|H ");
 358       break;
 359     case _humongous_cont:
 360       st->print("|HC");
 361       break;
 362     case _cset:
 363       st->print("|CS");
 364       break;
 365     case _trash:
 366       st->print("|T ");
 367       break;
 368     case _pinned:
 369       st->print("|P ");
 370       break;
 371     default:
 372       ShouldNotReachHere();
 373   }
 374   st->print("|BTE " PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT,
 375             p2i(bottom()), p2i(top()), p2i(end()));
 376   st->print("|U %3d%%", (int) ((double) used() * 100 / capacity()));
 377   st->print("|T %3d%%", (int) ((double) get_tlab_allocs() * 100 / capacity()));
 378   st->print("|G %3d%%", (int) ((double) get_gclab_allocs() * 100 / capacity()));
 379   st->print("|S %3d%%", (int) ((double) get_shared_allocs() * 100 / capacity()));
 380   st->print("|L %3d%%", (int) ((double) get_live_data_bytes() * 100 / capacity()));
 381   st->print("|FTS " UINT64_FORMAT_W(15), first_alloc_seq_num());
 382   st->print("|LTS " UINT64_FORMAT_W(15), last_alloc_seq_num());
 383   if (is_root()) {
 384     st->print("|R");
 385   } else {
 386     st->print("| ");
 387   }
 388   st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
 389 
 390   st->print_cr("|TAMS " PTR_FORMAT "|",
 391                p2i(ShenandoahHeap::heap()->top_at_mark_start(_bottom)));
 392 }
 393 
 394 void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) {
 395   if (!is_active()) return;
 396   if (is_humongous()) {
 397     oop_iterate_humongous(blk);
 398   } else {
 399     oop_iterate_objects(blk);
 400   }
 401 }
 402 
 403 void ShenandoahHeapRegion::oop_iterate_objects(ExtendedOopClosure* blk) {
 404   assert(! is_humongous(), "no humongous region here");
 405   HeapWord* obj_addr = bottom() + BrooksPointer::word_size();
 406   HeapWord* t = top();
 407   // Could call objects iterate, but this is easier.
 408   while (obj_addr < t) {
 409     oop obj = oop(obj_addr);
 410     obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size();
 411   }
 412 }
 413 
 414 void ShenandoahHeapRegion::oop_iterate_humongous(ExtendedOopClosure* blk) {
 415   assert(is_humongous(), "only humongous region here");
 416   // Find head.
 417   ShenandoahHeapRegion* r = humongous_start_region();
 418   assert(r->is_humongous_start(), "need humongous head here");
 419   oop obj = oop(r->bottom() + BrooksPointer::word_size());
 420   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 421 }
 422 
 423 void ShenandoahHeapRegion::fill_region() {
 424   ShenandoahHeap* sh = ShenandoahHeap::heap();
 425 
 426   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 427     HeapWord* filler = allocate(BrooksPointer::word_size(), ShenandoahHeap::_alloc_shared);
 428     HeapWord* obj = allocate(end() - top(), ShenandoahHeap::_alloc_shared);
 429     sh->fill_with_object(obj, end() - obj);
 430     BrooksPointer::initialize(oop(obj));
 431   }
 432 }
 433 
 434 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 435   assert(is_humongous(), "Must be a part of the humongous region");
 436   size_t reg_num = region_number();
 437   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 438   while (!r->is_humongous_start()) {
 439     assert(reg_num > 0, "Sanity");
 440     reg_num --;
 441     r = _heap->regions()->get(reg_num);
 442     assert(r->is_humongous(), "Must be a part of the humongous region");
 443   }
 444   assert(r->is_humongous_start(), "Must be");
 445   return r;
 446 }
 447 
 448 void ShenandoahHeapRegion::recycle_no_matrix() {
 449   ContiguousSpace::clear(false);
 450   if (ZapUnusedHeapArea) {
 451     ContiguousSpace::mangle_unused_area_complete();
 452   }
 453   clear_live_data();
 454   _root = false;
 455   reset_alloc_stats();
 456 
 457   // Reset seq numbers
 458   _first_alloc_seq_num = 0;
 459   _last_alloc_seq_num = 0;
 460 
 461   // Reset TAMS pointer to ensure size-based iteration, everything
 462   // in that regions is going to be new objects.
 463   _heap->set_top_at_mark_start(bottom(), bottom());
 464   // We can only safely reset the TAMS pointer if the bitmap is clear for that region.
 465   assert(_heap->is_bitmap_clear_range(bottom(), end()), "must be clear");
 466 
 467   make_empty_committed();
 468 }
 469 
 470 void ShenandoahHeapRegion::recycle() {
 471   recycle_no_matrix();
 472   if (UseShenandoahMatrix) {
 473     _heap->connection_matrix()->clear_region(region_number());
 474   }
 475 }
 476 
 477 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 478   assert(MemRegion(bottom(), end()).contains(p),
 479          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 480          p2i(p), p2i(bottom()), p2i(end()));
 481   if (p >= top()) {
 482     return top();
 483   } else {
 484     HeapWord* last = bottom() + BrooksPointer::word_size();
 485     HeapWord* cur = last;
 486     while (cur <= p) {
 487       last = cur;
 488       cur += oop(cur)->size() + BrooksPointer::word_size();
 489     }
 490     assert(oopDesc::is_oop(oop(last)),
 491            PTR_FORMAT" should be an object start", p2i(last));
 492     return last;
 493   }
 494 }
 495 
 496 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 497   // Absolute minimums we should not ever break.
 498   // Minimum region size should be to fit at least one page for mark bitmap for the region,
 499   // or at least 256K, whatever is larger.
 500   static const size_t MIN_REGION_SIZE = MAX2(256*K, os::vm_page_size() * MarkBitMap::heap_map_factor());
 501   static const size_t MIN_NUM_REGIONS = 10;
 502 
 503   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 504     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 505   }
 506 
 507   uintx region_size;
 508   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 509     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 510       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 511                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
 512                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
 513       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 514     }
 515     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 516       err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
 517                       ShenandoahMinRegionSize/K,  MIN_REGION_SIZE/K);
 518       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 519     }
 520     if (ShenandoahMinRegionSize < MinTLABSize) {
 521       err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
 522                       ShenandoahMinRegionSize/K,  MinTLABSize/K);
 523       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 524     }
 525     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 526       err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
 527                       ShenandoahMaxRegionSize/K,  MIN_REGION_SIZE/K);
 528       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 529     }
 530     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 531       err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
 532                       ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
 533       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 534     }
 535     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 536     region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
 537                        ShenandoahMinRegionSize);
 538 
 539     // Now make sure that we don't go over or under our limits.
 540     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 541     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 542 
 543   } else {
 544     if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 545       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 546                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
 547                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
 548       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 549     }
 550     if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
 551       err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
 552                       ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
 553       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 554     }
 555     if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
 556       err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
 557                       ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
 558       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 559     }
 560     region_size = ShenandoahHeapRegionSize;
 561   }
 562 
 563   // Make sure region size is at least one large page, if enabled.
 564   // Otherwise, mem-protecting one region may falsely protect the adjacent
 565   // regions too.
 566   if (UseLargePages) {
 567     region_size = MAX2(region_size, os::large_page_size());
 568   }
 569 
 570   int region_size_log = log2_long((jlong) region_size);
 571   // Recalculate the region size to make sure it's a power of
 572   // 2. This means that region_size is the largest power of 2 that's
 573   // <= what we've calculated so far.
 574   region_size = ((uintx)1 << region_size_log);
 575 
 576   // Now, set up the globals.
 577   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 578   RegionSizeBytesShift = (size_t)region_size_log;
 579 
 580   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 581   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 582 
 583   guarantee(RegionSizeBytes == 0, "we should only set it once");
 584   RegionSizeBytes = (size_t)region_size;
 585   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 586   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 587 
 588   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 589   RegionSizeWordsMask = RegionSizeWords - 1;
 590 
 591   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 592   RegionSizeBytesMask = RegionSizeBytes - 1;
 593 
 594   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 595   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 596   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 597 
 598   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 599   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 600   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 601 
 602   // The rationale for trimming the TLAB sizes has to do with the raciness in
 603   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 604   // about next free size, gets the answer for region #N, goes away for a while, then
 605   // tries to allocate in region #N, and fail because some other thread have claimed part
 606   // of the region #N, and then the freeset allocation code has to retire the region #N,
 607   // before moving the allocation to region #N+1.
 608   //
 609   // The worst case realizes when "answer" is "region size", which means it could
 610   // prematurely retire an entire region. Having smaller TLABs does not fix that
 611   // completely, but reduces the probability of too wasteful region retirement.
 612   // With current divisor, we will waste no more than 1/8 of region size in the worst
 613   // case. This also has a secondary effect on collection set selection: even under
 614   // the race, the regions would be at least 7/8 used, which allows relying on
 615   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 616   // below the garbage threshold that would never be considered for collection.
 617   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 618   MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes);
 619   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 620 
 621   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 622   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 623   log_info(gc, init)("Region size byte shift: "SIZE_FORMAT, RegionSizeBytesShift);
 624   log_info(gc, init)("Humongous threshold in bytes: "SIZE_FORMAT, HumongousThresholdBytes);
 625   log_info(gc, init)("Max TLAB size in bytes: "SIZE_FORMAT, MaxTLABSizeBytes);
 626   log_info(gc, init)("Number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 627 }
 628 
 629 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 630   return _heap->next_compaction_region(this);
 631 }
 632 
 633 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 634   scan_and_forward(this, cp);
 635 }
 636 
 637 void ShenandoahHeapRegion::adjust_pointers() {
 638   // Check first is there is any work to do.
 639   if (used() == 0) {
 640     return;   // Nothing to do.
 641   }
 642 
 643   scan_and_adjust_pointers(this);
 644 }
 645 
 646 void ShenandoahHeapRegion::compact() {
 647   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 648   scan_and_compact(this);
 649 }
 650 
 651 void ShenandoahHeapRegion::do_commit() {
 652   if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
 653     report_java_out_of_memory("Unable to commit region");
 654   }
 655   if (!_heap->commit_bitmaps(this)) {
 656     report_java_out_of_memory("Unable to commit bitmaps for region");
 657   }
 658   _heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 659 }
 660 
 661 void ShenandoahHeapRegion::do_uncommit() {
 662   if (!os::uncommit_memory((char *) _reserved.start(), _reserved.byte_size())) {
 663     report_java_out_of_memory("Unable to uncommit region");
 664   }
 665   if (!_heap->uncommit_bitmaps(this)) {
 666     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 667   }
 668   _heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 669 }