1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc/shared/space.inline.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/os.hpp"
  35 #include "runtime/safepoint.hpp"
  36 
  37 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never);
  38 size_t ShenandoahHeapRegion::RegionSizeShift = 0;
  39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  40 
  41 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
  42                                            size_t regionSizeWords, size_t index) :
  43 #ifdef ASSERT
  44   _mem_protection_level(0),
  45 #endif
  46   _heap(heap),
  47   _region_number(index),
  48   _live_data(0),
  49   reserved(MemRegion(start, regionSizeWords)),
  50   _humongous_start(false),
  51   _humongous_continuation(false),
  52   _humongous_obj_array(false),
  53   _recycled(true),
  54   _root(false),
  55   _new_top(NULL),
  56   _critical_pins(0) {
  57 
  58   ContiguousSpace::initialize(reserved, true, false);
  59 }
  60 
  61 size_t ShenandoahHeapRegion::region_number() const {
  62   return _region_number;
  63 }
  64 
  65 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
  66   set_top(top() - size);
  67   return true;
  68 }
  69 
  70 void ShenandoahHeapRegion::clear_live_data() {
  71   assert(Thread::current()->is_VM_thread(), "by VM thread");
  72   _live_data = 0;
  73 }
  74 
  75 void ShenandoahHeapRegion::set_recently_allocated(bool value) {
  76   _recycled = value;
  77 }
  78 
  79 bool ShenandoahHeapRegion::is_recently_allocated() const {
  80   return _recycled && used() > 0;
  81 }
  82 
  83 void ShenandoahHeapRegion::set_live_data(size_t s) {
  84   assert(Thread::current()->is_VM_thread(), "by VM thread");
  85   _live_data = (jint) (s / HeapWordSize);
  86 }
  87 
  88 size_t ShenandoahHeapRegion::get_live_data_words() const {
  89   return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data);
  90 }
  91 
  92 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
  93   return get_live_data_words() * HeapWordSize;
  94 }
  95 
  96 bool ShenandoahHeapRegion::has_live() const {
  97   return get_live_data_words() != 0;
  98 }
  99 
 100 size_t ShenandoahHeapRegion::garbage() const {
 101   assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
 102          get_live_data_bytes(), used());
 103   size_t result = used() - get_live_data_bytes();
 104   return result;
 105 }
 106 
 107 bool ShenandoahHeapRegion::in_collection_set() const {
 108   return _heap->region_in_collection_set(_region_number);
 109 }
 110 
 111 void ShenandoahHeapRegion::set_in_collection_set(bool b) {
 112   assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
 113 
 114   _heap->set_region_in_collection_set(_region_number, b);
 115 
 116 #ifdef ASSERT
 117   if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
 118     if (b) {
 119       memProtectionOn();
 120       assert(_mem_protection_level == 0, "need to be protected here");
 121     } else {
 122       assert(_mem_protection_level == 0, "need to be protected here");
 123       memProtectionOff();
 124     }
 125   }
 126 #endif
 127 }
 128 
 129 #include <sys/mman.h>
 130 
 131 #ifdef ASSERT
 132 
 133 void ShenandoahHeapRegion::memProtectionOn() {
 134   /*
 135   log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level);
 136   print(tty);
 137   */
 138   MutexLockerEx ml(&_mem_protect_lock, true);
 139   assert(_mem_protection_level >= 1, "invariant");
 140 
 141   if (--_mem_protection_level == 0) {
 142     if (ShenandoahVerifyWritesToFromSpace) {
 143       assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
 144       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
 145     } else {
 146       assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
 147       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
 148     }
 149   }
 150 }
 151 
 152 void ShenandoahHeapRegion::memProtectionOff() {
 153   /*
 154   tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
 155   print(tty);
 156   */
 157   MutexLockerEx ml(&_mem_protect_lock, true);
 158   assert(_mem_protection_level >= 0, "invariant");
 159   if (_mem_protection_level++ == 0) {
 160     os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
 161   }
 162 }
 163 
 164 #endif
 165 
 166 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 167   st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number);
 168 
 169   if (in_collection_set())
 170     st->print("C");
 171   if (is_humongous_start()) {
 172     st->print("H");
 173   }
 174   if (is_humongous_continuation()) {
 175     st->print("h");
 176   }
 177   //else
 178     st->print(" ");
 179 
 180   st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
 181                get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
 182 }
 183 
 184 
 185 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
 186   HeapWord* p = bottom() + BrooksPointer::word_size();
 187   while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) {
 188     blk->do_object(oop(p));
 189 #ifdef ASSERT
 190     if (ShenandoahVerifyReadsToFromSpace) {
 191       memProtectionOff();
 192       p += oop(p)->size() + BrooksPointer::word_size();
 193       memProtectionOn();
 194     } else {
 195       p += oop(p)->size() + BrooksPointer::word_size();
 196     }
 197 #else
 198       p += oop(p)->size() + BrooksPointer::word_size();
 199 #endif
 200   }
 201 }
 202 
 203 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
 204   HeapWord * limit = concurrent_iteration_safe_limit();
 205   assert(limit <= top(), "sanity check");
 206   for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) {
 207     size_t size = blk->do_object_careful(oop(p));
 208     if (size == 0) {
 209       return p;  // failed at p
 210     } else {
 211       p += size + BrooksPointer::word_size();
 212     }
 213   }
 214   return NULL; // all done
 215 }
 216 
 217 void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) {
 218   if (is_empty()) return;
 219   if (is_humongous_obj_array()) {
 220     oop_iterate_humongous(blk);
 221   } else {
 222     oop_iterate_objects(blk);
 223   }
 224 }
 225 
 226 void ShenandoahHeapRegion::oop_iterate_objects(ExtendedOopClosure* blk) {
 227   assert(! is_humongous_obj_array(), "no humongous obj array here");
 228   assert(! is_humongous_continuation(), "no humongous continuation here");
 229   // Note: it is ok to have a humongous start region here, e.g. in the case
 230   // of an int[]. We might want to visit its header.
 231   HeapWord* obj_addr = bottom() + BrooksPointer::word_size();
 232   HeapWord* t = top();
 233   // Could call objects iterate, but this is easier.
 234   while (obj_addr < t) {
 235     oop obj = oop(obj_addr);
 236     obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size();
 237   }
 238 }
 239 
 240 void ShenandoahHeapRegion::oop_iterate_humongous(ExtendedOopClosure* blk) {
 241   assert(is_humongous_obj_array(), "only humongous obj array here");
 242   // Find head.
 243   ShenandoahHeapRegionSet* regions = _heap->regions();
 244   uint idx = region_number();
 245   ShenandoahHeapRegion* r = regions->get(idx);
 246   while (! r->is_humongous_start()) {
 247     idx--;
 248     r = regions->get(idx);
 249   }
 250   assert(r->is_humongous_start(), "need humongous head here");
 251   objArrayOop array = objArrayOop(r->bottom() + BrooksPointer::word_size());
 252   array->oop_iterate(blk, MemRegion(bottom(), top()));
 253 }
 254 
 255 void ShenandoahHeapRegion::fill_region() {
 256   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 257 
 258   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 259     HeapWord* filler = allocate(BrooksPointer::word_size());
 260     HeapWord* obj = allocate(end() - top());
 261     sh->fill_with_object(obj, end() - obj);
 262     BrooksPointer::initialize(oop(obj));
 263   }
 264 }
 265 
 266 void ShenandoahHeapRegion::set_humongous_start(bool start) {
 267   _humongous_start = start;
 268 }
 269 
 270 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
 271   _humongous_continuation = continuation;
 272 }
 273 
 274 void ShenandoahHeapRegion::set_humongous_obj_array(bool obj_array) {
 275   _humongous_obj_array = obj_array;
 276 }
 277 
 278 bool ShenandoahHeapRegion::is_humongous() const {
 279   return _humongous_start || _humongous_continuation;
 280 }
 281 
 282 bool ShenandoahHeapRegion::is_humongous_start() const {
 283   return _humongous_start;
 284 }
 285 
 286 bool ShenandoahHeapRegion::is_humongous_continuation() const {
 287   return _humongous_continuation;
 288 }
 289 
 290 bool ShenandoahHeapRegion::is_humongous_obj_array() const {
 291   return _humongous_continuation;
 292 }
 293 
 294 void ShenandoahHeapRegion::recycle() {
 295   ContiguousSpace::initialize(reserved, true, false);
 296   clear_live_data();
 297   _humongous_start = false;
 298   _humongous_continuation = false;
 299   _humongous_obj_array = false;
 300   _recycled = true;
 301   _root = false;
 302   set_in_collection_set(false);
 303   // Reset C-TAMS pointer to ensure size-based iteration, everything
 304   // in that regions is going to be new objects.
 305   _heap->set_complete_top_at_mark_start(bottom(), bottom());
 306   // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region.
 307   assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear");
 308   _heap->connection_matrix()->clear_region(region_number());
 309 }
 310 
 311 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 312   assert(MemRegion(bottom(), end()).contains(p),
 313          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 314          p2i(p), p2i(bottom()), p2i(end()));
 315   if (p >= top()) {
 316     return top();
 317   } else {
 318     HeapWord* last = bottom() + BrooksPointer::word_size();
 319     HeapWord* cur = last;
 320     while (cur <= p) {
 321       last = cur;
 322       cur += oop(cur)->size() + BrooksPointer::word_size();
 323     }
 324     assert(oop(last)->is_oop(),
 325            PTR_FORMAT" should be an object start", p2i(last));
 326     return last;
 327   }
 328 }
 329 
 330 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 331   // Absolute minimums we should not ever break:
 332   static const size_t MIN_REGION_SIZE = 256*K;
 333   static const size_t MIN_NUM_REGIONS = 10;
 334 
 335   uintx region_size;
 336   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 337     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 338       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 339                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
 340                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
 341       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 342     }
 343     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 344       err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
 345                       ShenandoahMinRegionSize/K,  MIN_REGION_SIZE/K);
 346       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 347     }
 348     if (ShenandoahMinRegionSize < MinTLABSize) {
 349       err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
 350                       ShenandoahMinRegionSize/K,  MinTLABSize/K);
 351       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 352     }
 353     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 354       err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
 355                       ShenandoahMaxRegionSize/K,  MIN_REGION_SIZE/K);
 356       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 357     }
 358     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 359       err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
 360                       ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
 361       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 362     }
 363     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 364     region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
 365                        ShenandoahMinRegionSize);
 366 
 367     // Now make sure that we don't go over or under our limits.
 368     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 369     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 370 
 371   } else {
 372     if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 373       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 374                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
 375                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
 376       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 377     }
 378     if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
 379       err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
 380                       ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
 381       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 382     }
 383     if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
 384       err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
 385                       ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
 386       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 387     }
 388     region_size = ShenandoahHeapRegionSize;
 389   }
 390 
 391   // Make sure region size is at least one large page, if enabled.
 392   // Otherwise, mem-protecting one region may falsely protect the adjacent
 393   // regions too.
 394   if (UseLargePages) {
 395     region_size = MAX2(region_size, os::large_page_size());
 396   }
 397 
 398   int region_size_log = log2_long((jlong) region_size);
 399   // Recalculate the region size to make sure it's a power of
 400   // 2. This means that region_size is the largest power of 2 that's
 401   // <= what we've calculated so far.
 402   region_size = ((uintx)1 << region_size_log);
 403 
 404   // Now, set up the globals.
 405   guarantee(RegionSizeShift == 0, "we should only set it once");
 406   RegionSizeShift = region_size_log;
 407 
 408   guarantee(RegionSizeBytes == 0, "we should only set it once");
 409   RegionSizeBytes = (size_t)region_size;
 410 
 411   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 412   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 413   log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift);
 414   log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
 415   log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 416 }
 417 
 418 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 419   return _heap->next_compaction_region(this);
 420 }
 421 
 422 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 423   scan_and_forward(this, cp);
 424 }
 425 
 426 void ShenandoahHeapRegion::adjust_pointers() {
 427   // Check first is there is any work to do.
 428   if (used() == 0) {
 429     return;   // Nothing to do.
 430   }
 431 
 432   scan_and_adjust_pointers(this);
 433 }
 434 
 435 void ShenandoahHeapRegion::compact() {
 436   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 437   scan_and_compact(this);
 438 }
 439 
 440 void ShenandoahHeapRegion::pin() {
 441   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 442   assert(_critical_pins >= 0, "sanity");
 443   Atomic::inc(&_critical_pins);
 444 }
 445 
 446 void ShenandoahHeapRegion::unpin() {
 447   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 448   Atomic::dec(&_critical_pins);
 449   assert(_critical_pins >= 0, "sanity");
 450 }
 451 
 452 bool ShenandoahHeapRegion::is_pinned() {
 453   jint v = OrderAccess::load_acquire(&_critical_pins);
 454   assert(v >= 0, "sanity");
 455   return v > 0;
 456 }