1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc/shared/space.inline.hpp"
  31 #include "memory/universe.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/os.hpp"
  35 #include "runtime/safepoint.hpp"
  36 
  37 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never);
  38 size_t ShenandoahHeapRegion::RegionSizeShift = 0;
  39 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  40 
  41 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
  42                                            size_t regionSizeWords, size_t index) :
  43 #ifdef ASSERT
  44   _mem_protection_level(0),
  45 #endif
  46   _heap(heap),
  47   _region_number(index),
  48   _live_data(0),
  49   reserved(MemRegion(start, regionSizeWords)),
  50   _humongous_start(false),
  51   _humongous_continuation(false),
  52   _recycled(true),
  53   _root(false),
  54   _new_top(NULL),
  55   _critical_pins(0) {
  56 
  57   ContiguousSpace::initialize(reserved, true, false);
  58 }
  59 
  60 size_t ShenandoahHeapRegion::region_number() const {
  61   return _region_number;
  62 }
  63 
  64 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
  65   set_top(top() - size);
  66   return true;
  67 }
  68 
  69 void ShenandoahHeapRegion::clear_live_data() {
  70   assert(Thread::current()->is_VM_thread(), "by VM thread");
  71   _live_data = 0;
  72 }
  73 
  74 void ShenandoahHeapRegion::set_recently_allocated(bool value) {
  75   _recycled = value;
  76 }
  77 
  78 bool ShenandoahHeapRegion::is_recently_allocated() const {
  79   return _recycled && used() > 0;
  80 }
  81 
  82 void ShenandoahHeapRegion::set_live_data(size_t s) {
  83   assert(Thread::current()->is_VM_thread(), "by VM thread");
  84   _live_data = (jint) (s / HeapWordSize);
  85 }
  86 
  87 size_t ShenandoahHeapRegion::get_live_data_words() const {
  88   return (size_t)OrderAccess::load_acquire((volatile jint*)&_live_data);
  89 }
  90 
  91 size_t ShenandoahHeapRegion::get_live_data_bytes() const {
  92   return get_live_data_words() * HeapWordSize;
  93 }
  94 
  95 bool ShenandoahHeapRegion::has_live() const {
  96   return get_live_data_words() != 0;
  97 }
  98 
  99 size_t ShenandoahHeapRegion::garbage() const {
 100   assert(used() >= get_live_data_bytes() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
 101          get_live_data_bytes(), used());
 102   size_t result = used() - get_live_data_bytes();
 103   return result;
 104 }
 105 
 106 bool ShenandoahHeapRegion::in_collection_set() const {
 107   return _heap->region_in_collection_set(_region_number);
 108 }
 109 
 110 void ShenandoahHeapRegion::set_in_collection_set(bool b) {
 111   assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
 112 
 113   _heap->set_region_in_collection_set(_region_number, b);
 114 
 115 #ifdef ASSERT
 116   if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
 117     if (b) {
 118       memProtectionOn();
 119       assert(_mem_protection_level == 0, "need to be protected here");
 120     } else {
 121       assert(_mem_protection_level == 0, "need to be protected here");
 122       memProtectionOff();
 123     }
 124   }
 125 #endif
 126 }
 127 
 128 #include <sys/mman.h>
 129 
 130 #ifdef ASSERT
 131 
 132 void ShenandoahHeapRegion::memProtectionOn() {
 133   /*
 134   log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level);
 135   print(tty);
 136   */
 137   MutexLockerEx ml(&_mem_protect_lock, true);
 138   assert(_mem_protection_level >= 1, "invariant");
 139 
 140   if (--_mem_protection_level == 0) {
 141     if (ShenandoahVerifyWritesToFromSpace) {
 142       assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
 143       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
 144     } else {
 145       assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
 146       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
 147     }
 148   }
 149 }
 150 
 151 void ShenandoahHeapRegion::memProtectionOff() {
 152   /*
 153   tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
 154   print(tty);
 155   */
 156   MutexLockerEx ml(&_mem_protect_lock, true);
 157   assert(_mem_protection_level >= 0, "invariant");
 158   if (_mem_protection_level++ == 0) {
 159     os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
 160   }
 161 }
 162 
 163 #endif
 164 
 165 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 166   st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number);
 167 
 168   if (in_collection_set())
 169     st->print("C");
 170   if (is_humongous_start()) {
 171     st->print("H");
 172   }
 173   if (is_humongous_continuation()) {
 174     st->print("h");
 175   }
 176   //else
 177     st->print(" ");
 178 
 179   st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
 180                get_live_data_bytes(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
 181 }
 182 
 183 
 184 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
 185   HeapWord* p = bottom() + BrooksPointer::word_size();
 186   while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) {
 187     blk->do_object(oop(p));
 188 #ifdef ASSERT
 189     if (ShenandoahVerifyReadsToFromSpace) {
 190       memProtectionOff();
 191       p += oop(p)->size() + BrooksPointer::word_size();
 192       memProtectionOn();
 193     } else {
 194       p += oop(p)->size() + BrooksPointer::word_size();
 195     }
 196 #else
 197       p += oop(p)->size() + BrooksPointer::word_size();
 198 #endif
 199   }
 200 }
 201 
 202 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
 203   HeapWord * limit = concurrent_iteration_safe_limit();
 204   assert(limit <= top(), "sanity check");
 205   for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) {
 206     size_t size = blk->do_object_careful(oop(p));
 207     if (size == 0) {
 208       return p;  // failed at p
 209     } else {
 210       p += size + BrooksPointer::word_size();
 211     }
 212   }
 213   return NULL; // all done
 214 }
 215 
 216 void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) {
 217   if (is_empty()) return;
 218   HeapWord* obj_addr = bottom() + BrooksPointer::word_size();
 219   HeapWord* t = top();
 220   // Could call objects iterate, but this is easier.
 221   while (obj_addr < t) {
 222     oop obj = oop(obj_addr);
 223     obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size();
 224   }
 225 }
 226 
 227 void ShenandoahHeapRegion::fill_region() {
 228   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 229 
 230   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 231     HeapWord* filler = allocate(BrooksPointer::word_size());
 232     HeapWord* obj = allocate(end() - top());
 233     sh->fill_with_object(obj, end() - obj);
 234     BrooksPointer::initialize(oop(obj));
 235   }
 236 }
 237 
 238 void ShenandoahHeapRegion::set_humongous_start(bool start) {
 239   _humongous_start = start;
 240 }
 241 
 242 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
 243   _humongous_continuation = continuation;
 244 }
 245 
 246 bool ShenandoahHeapRegion::is_humongous() const {
 247   return _humongous_start || _humongous_continuation;
 248 }
 249 
 250 bool ShenandoahHeapRegion::is_humongous_start() const {
 251   return _humongous_start;
 252 }
 253 
 254 bool ShenandoahHeapRegion::is_humongous_continuation() const {
 255   return _humongous_continuation;
 256 }
 257 
 258 void ShenandoahHeapRegion::recycle() {
 259   ContiguousSpace::initialize(reserved, true, false);
 260   clear_live_data();
 261   _humongous_start = false;
 262   _humongous_continuation = false;
 263   _recycled = true;
 264   _root = false;
 265   set_in_collection_set(false);
 266   // Reset C-TAMS pointer to ensure size-based iteration, everything
 267   // in that regions is going to be new objects.
 268   _heap->set_complete_top_at_mark_start(bottom(), bottom());
 269   // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region.
 270   assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear");
 271   _heap->connection_matrix()->clear_region(region_number());
 272 }
 273 
 274 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 275   assert(MemRegion(bottom(), end()).contains(p),
 276          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 277          p2i(p), p2i(bottom()), p2i(end()));
 278   if (p >= top()) {
 279     return top();
 280   } else {
 281     HeapWord* last = bottom() + BrooksPointer::word_size();
 282     HeapWord* cur = last;
 283     while (cur <= p) {
 284       last = cur;
 285       cur += oop(cur)->size() + BrooksPointer::word_size();
 286     }
 287     assert(oop(last)->is_oop(),
 288            PTR_FORMAT" should be an object start", p2i(last));
 289     return last;
 290   }
 291 }
 292 
 293 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 294   // Absolute minimums we should not ever break:
 295   static const size_t MIN_REGION_SIZE = 256*K;
 296   static const size_t MIN_NUM_REGIONS = 10;
 297 
 298   uintx region_size;
 299   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 300     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 301       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 302                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
 303                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);
 304       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 305     }
 306     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 307       err_msg message("" SIZE_FORMAT "K should not be lower than minimum region size (" SIZE_FORMAT "K).",
 308                       ShenandoahMinRegionSize/K,  MIN_REGION_SIZE/K);
 309       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 310     }
 311     if (ShenandoahMinRegionSize < MinTLABSize) {
 312       err_msg message("" SIZE_FORMAT "K should not be lower than TLAB size size (" SIZE_FORMAT "K).",
 313                       ShenandoahMinRegionSize/K,  MinTLABSize/K);
 314       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 315     }
 316     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 317       err_msg message("" SIZE_FORMAT "K should not be lower than min region size (" SIZE_FORMAT "K).",
 318                       ShenandoahMaxRegionSize/K,  MIN_REGION_SIZE/K);
 319       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 320     }
 321     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 322       err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
 323                       ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
 324       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 325     }
 326     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 327     region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
 328                        ShenandoahMinRegionSize);
 329 
 330     // Now make sure that we don't go over or under our limits.
 331     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 332     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 333 
 334   } else {
 335     if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 336       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
 337                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "K).",
 338                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahHeapRegionSize/K);
 339       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 340     }
 341     if (ShenandoahHeapRegionSize < ShenandoahMinRegionSize) {
 342       err_msg message("Heap region size (" SIZE_FORMAT "K) should be larger than min region size (" SIZE_FORMAT "K).",
 343                       ShenandoahHeapRegionSize/K, ShenandoahMinRegionSize/K);
 344       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 345     }
 346     if (ShenandoahHeapRegionSize > ShenandoahMaxRegionSize) {
 347       err_msg message("Heap region size (" SIZE_FORMAT "K) should be lower than max region size (" SIZE_FORMAT "K).",
 348                       ShenandoahHeapRegionSize/K, ShenandoahMaxRegionSize/K);
 349       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option", message);
 350     }
 351     region_size = ShenandoahHeapRegionSize;
 352   }
 353 
 354   // Make sure region size is at least one large page, if enabled.
 355   // Otherwise, mem-protecting one region may falsely protect the adjacent
 356   // regions too.
 357   if (UseLargePages) {
 358     region_size = MAX2(region_size, os::large_page_size());
 359   }
 360 
 361   int region_size_log = log2_long((jlong) region_size);
 362   // Recalculate the region size to make sure it's a power of
 363   // 2. This means that region_size is the largest power of 2 that's
 364   // <= what we've calculated so far.
 365   region_size = ((uintx)1 << region_size_log);
 366 
 367   // Now, set up the globals.
 368   guarantee(RegionSizeShift == 0, "we should only set it once");
 369   RegionSizeShift = region_size_log;
 370 
 371   guarantee(RegionSizeBytes == 0, "we should only set it once");
 372   RegionSizeBytes = (size_t)region_size;
 373 
 374   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 375   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 376   log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift);
 377   log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
 378   log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 379 }
 380 
 381 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 382   return _heap->next_compaction_region(this);
 383 }
 384 
 385 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 386   scan_and_forward(this, cp);
 387 }
 388 
 389 void ShenandoahHeapRegion::adjust_pointers() {
 390   // Check first is there is any work to do.
 391   if (used() == 0) {
 392     return;   // Nothing to do.
 393   }
 394 
 395   scan_and_adjust_pointers(this);
 396 }
 397 
 398 void ShenandoahHeapRegion::compact() {
 399   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 400   scan_and_compact(this);
 401 }
 402 
 403 void ShenandoahHeapRegion::pin() {
 404   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 405   assert(_critical_pins >= 0, "sanity");
 406   Atomic::inc(&_critical_pins);
 407 }
 408 
 409 void ShenandoahHeapRegion::unpin() {
 410   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 411   Atomic::dec(&_critical_pins);
 412   assert(_critical_pins >= 0, "sanity");
 413 }
 414 
 415 bool ShenandoahHeapRegion::is_pinned() {
 416   jint v = OrderAccess::load_acquire(&_critical_pins);
 417   assert(v >= 0, "sanity");
 418   return v > 0;
 419 }