1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shared/space.inline.hpp"
  29 #include "memory/universe.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 
  35 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never);
  36 size_t ShenandoahHeapRegion::RegionSizeShift = 0;
  37 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  38 
  39 jint ShenandoahHeapRegion::initialize_heap_region(HeapWord* start,
  40                                                   size_t regionSizeWords, size_t index) {
  41 
  42   reserved = MemRegion(start, regionSizeWords);
  43   ContiguousSpace::initialize(reserved, true, false);
  44   _live_data = 0;
  45   _is_in_collection_set = false;
  46   _region_number = index;
  47 #ifdef ASSERT
  48   _mem_protection_level = 1; // Off, level 1.
  49 #endif
  50   _top_at_mark_start = bottom();
  51   _top_at_prev_mark_start = bottom();
  52   _top_prev_mark_bitmap = bottom();
  53   return JNI_OK;
  54 }
  55 
  56 size_t ShenandoahHeapRegion::region_number() const {
  57   return _region_number;
  58 }
  59 
  60 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
  61   set_top(top() - size);
  62   return true;
  63 }
  64 
  65 void ShenandoahHeapRegion::clear_live_data() {
  66   assert(Thread::current()->is_VM_thread(), "by VM thread");
  67   _live_data = 0;
  68 }
  69 
  70 void ShenandoahHeapRegion::set_live_data(size_t s) {
  71   assert(Thread::current()->is_VM_thread(), "by VM thread");
  72   _live_data = s;
  73 }
  74 
  75 size_t ShenandoahHeapRegion::get_live_data() const {
  76   assert (sizeof(julong) == sizeof(size_t), "do not read excessively");
  77   return (size_t)OrderAccess::load_acquire((volatile julong*)&_live_data);
  78 }
  79 
  80 size_t ShenandoahHeapRegion::garbage() const {
  81   assert(used() >= get_live_data() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
  82          get_live_data(), used());
  83   size_t result = used() - get_live_data();
  84   return result;
  85 }
  86 
  87 bool ShenandoahHeapRegion::is_in_collection_set() const {
  88   return _is_in_collection_set;
  89 }
  90 
  91 #include <sys/mman.h>
  92 
  93 #ifdef ASSERT
  94 
  95 void ShenandoahHeapRegion::memProtectionOn() {
  96   /*
  97   log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level);
  98   print(tty);
  99   */
 100   MutexLockerEx ml(&_mem_protect_lock, true);
 101   assert(_mem_protection_level >= 1, "invariant");
 102 
 103   if (--_mem_protection_level == 0) {
 104     if (ShenandoahVerifyWritesToFromSpace) {
 105       assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
 106       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
 107     } else {
 108       assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
 109       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
 110     }
 111   }
 112 }
 113 
 114 void ShenandoahHeapRegion::memProtectionOff() {
 115   /*
 116   tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
 117   print(tty);
 118   */
 119   MutexLockerEx ml(&_mem_protect_lock, true);
 120   assert(_mem_protection_level >= 0, "invariant");
 121   if (_mem_protection_level++ == 0) {
 122     os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
 123   }
 124 }
 125 
 126 #endif
 127 
 128 void ShenandoahHeapRegion::set_is_in_collection_set(bool b) {
 129   assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
 130 
 131   _is_in_collection_set = b;
 132 
 133   if (b) {
 134     // tty->print_cr("registering region in fast-cset");
 135     // print();
 136     ShenandoahHeap::heap()->register_region_with_in_cset_fast_test(this);
 137   }
 138 
 139 #ifdef ASSERT
 140   if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
 141     if (b) {
 142       memProtectionOn();
 143       assert(_mem_protection_level == 0, "need to be protected here");
 144     } else {
 145       assert(_mem_protection_level == 0, "need to be protected here");
 146       memProtectionOff();
 147     }
 148   }
 149 #endif
 150 }
 151 
 152 ByteSize ShenandoahHeapRegion::is_in_collection_set_offset() {
 153   return byte_offset_of(ShenandoahHeapRegion, _is_in_collection_set);
 154 }
 155 
 156 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 157   st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number);
 158 
 159   if (is_in_collection_set())
 160     st->print("C");
 161   if (is_humongous_start()) {
 162     st->print("H");
 163   }
 164   if (is_humongous_continuation()) {
 165     st->print("h");
 166   }
 167   //else
 168     st->print(" ");
 169 
 170   st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
 171                get_live_data(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
 172 }
 173 
 174 
 175 class SkipUnreachableObjectToOopClosure: public ObjectClosure {
 176   ExtendedOopClosure* _cl;
 177   bool _skip_unreachable_objects;
 178   ShenandoahHeap* _heap;
 179 
 180 public:
 181   SkipUnreachableObjectToOopClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
 182     _cl(cl), _skip_unreachable_objects(skip_unreachable_objects), _heap(ShenandoahHeap::heap()) {}
 183 
 184   void do_object(oop obj) {
 185 
 186     if ((! _skip_unreachable_objects) || _heap->is_marked_current(obj)) {
 187 #ifdef ASSERT
 188       if (_skip_unreachable_objects) {
 189         assert(_heap->is_marked_current(obj), "obj must be live");
 190       }
 191 #endif
 192       obj->oop_iterate(_cl);
 193     }
 194 
 195   }
 196 };
 197 
 198 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
 199   HeapWord* p = bottom() + BrooksPointer::word_size();
 200   ShenandoahHeap* heap = ShenandoahHeap::heap();
 201   while (p < top() && !(allow_cancel && heap->cancelled_concgc())) {
 202     blk->do_object(oop(p));
 203 #ifdef ASSERT
 204     if (ShenandoahVerifyReadsToFromSpace) {
 205       memProtectionOff();
 206       p += oop(p)->size() + BrooksPointer::word_size();
 207       memProtectionOn();
 208     } else {
 209       p += oop(p)->size() + BrooksPointer::word_size();
 210     }
 211 #else
 212       p += oop(p)->size() + BrooksPointer::word_size();
 213 #endif
 214   }
 215 }
 216 
 217 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
 218   HeapWord * limit = concurrent_iteration_safe_limit();
 219   assert(limit <= top(), "sanity check");
 220   for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) {
 221     size_t size = blk->do_object_careful(oop(p));
 222     if (size == 0) {
 223       return p;  // failed at p
 224     } else {
 225       p += size + BrooksPointer::word_size();
 226     }
 227   }
 228   return NULL; // all done
 229 }
 230 
 231 void ShenandoahHeapRegion::oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects) {
 232   SkipUnreachableObjectToOopClosure cl2(cl, skip_unreachable_objects);
 233   object_iterate_interruptible(&cl2, false);
 234 }
 235 
 236 void ShenandoahHeapRegion::fill_region() {
 237   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 238 
 239   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 240     HeapWord* filler = allocate(BrooksPointer::word_size());
 241     HeapWord* obj = allocate(end() - top());
 242     sh->fill_with_object(obj, end() - obj);
 243     BrooksPointer::initialize(oop(obj));
 244   }
 245 }
 246 
 247 void ShenandoahHeapRegion::set_humongous_start(bool start) {
 248   _humongous_start = start;
 249 }
 250 
 251 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
 252   _humongous_continuation = continuation;
 253 }
 254 
 255 bool ShenandoahHeapRegion::is_humongous() const {
 256   return _humongous_start || _humongous_continuation;
 257 }
 258 
 259 bool ShenandoahHeapRegion::is_humongous_start() const {
 260   return _humongous_start;
 261 }
 262 
 263 bool ShenandoahHeapRegion::is_humongous_continuation() const {
 264   return _humongous_continuation;
 265 }
 266 
 267 void ShenandoahHeapRegion::do_reset() {
 268   ContiguousSpace::initialize(reserved, true, false);
 269   clear_live_data();
 270   _humongous_start = false;
 271   _humongous_continuation = false;
 272   // _top_at_mark_start = bottom();
 273   _top_at_prev_mark_start = bottom();
 274 }
 275 
 276 void ShenandoahHeapRegion::recycle() {
 277   do_reset();
 278   set_is_in_collection_set(false);
 279 }
 280 
 281 void ShenandoahHeapRegion::reset() {
 282   assert(_mem_protection_level == 1, "needs to be unprotected here");
 283   do_reset();
 284   _is_in_collection_set = false;
 285 }
 286 
 287 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 288   assert(MemRegion(bottom(), end()).contains(p),
 289          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 290          p2i(p), p2i(bottom()), p2i(end()));
 291   if (p >= top()) {
 292     return top();
 293   } else {
 294     HeapWord* last = bottom() + BrooksPointer::word_size();
 295     HeapWord* cur = last;
 296     while (cur <= p) {
 297       last = cur;
 298       cur += oop(cur)->size() + BrooksPointer::word_size();
 299     }
 300     assert(oop(last)->is_oop(),
 301            PTR_FORMAT" should be an object start", p2i(last));
 302     return last;
 303   }
 304 }
 305 
 306 void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
 307   uintx region_size;
 308   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
 309     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 310       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option");
 311     }
 312     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 313       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option");
 314     }
 315     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 316       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option");
 317     }
 318     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 319       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize");
 320     }
 321     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
 322     region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
 323                        ShenandoahMinRegionSize);
 324 
 325     // Now make sure that we don't go over or under our limits.
 326     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 327     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 328 
 329   } else {
 330     if (ShenandoahHeapRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
 331       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option");
 332     }
 333     if (ShenandoahHeapRegionSize < MIN_REGION_SIZE) {
 334       vm_exit_during_initialization("Invalid -XX:ShenandoahHeapRegionSize option");
 335     }
 336     region_size = ShenandoahHeapRegionSize;
 337   }
 338 
 339   // Make sure region size is at least one large page, if enabled.
 340   // Otherwise, mem-protecting one region may falsely protect the adjacent
 341   // regions too.
 342   if (UseLargePages) {
 343     region_size = MAX2(region_size, os::large_page_size());
 344   }
 345 
 346   int region_size_log = log2_long((jlong) region_size);
 347   // Recalculate the region size to make sure it's a power of
 348   // 2. This means that region_size is the largest power of 2 that's
 349   // <= what we've calculated so far.
 350   region_size = ((uintx)1 << region_size_log);
 351 
 352   // Now, set up the globals.
 353   guarantee(RegionSizeShift == 0, "we should only set it once");
 354   RegionSizeShift = region_size_log;
 355 
 356   guarantee(RegionSizeBytes == 0, "we should only set it once");
 357   RegionSizeBytes = (size_t)region_size;
 358 
 359   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 360   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 361   log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift);
 362   log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
 363   log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 364 }
 365 
 366 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 367   return ShenandoahHeap::heap()->next_compaction_region(this);
 368 }
 369 
 370 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 371   scan_and_forward(this, cp);
 372 }
 373 
 374 void ShenandoahHeapRegion::adjust_pointers() {
 375   // Check first is there is any work to do.
 376   if (used() == 0) {
 377     return;   // Nothing to do.
 378   }
 379 
 380   scan_and_adjust_pointers(this);
 381 }
 382 
 383 void ShenandoahHeapRegion::compact() {
 384   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 385   scan_and_compact(this);
 386 }
 387 
 388 void ShenandoahHeapRegion::init_top_at_mark_start() {
 389   _top_at_mark_start = top();
 390   ShenandoahHeap::heap()->set_top_at_mark_start(bottom(), top());
 391 }
 392 
 393 void ShenandoahHeapRegion::set_top_at_mark_start(HeapWord* top) {
 394   _top_at_mark_start = top;
 395   ShenandoahHeap::heap()->set_top_at_mark_start(bottom(), top);
 396 }
 397 
 398 void ShenandoahHeapRegion::reset_top_at_prev_mark_start() {
 399   _top_at_prev_mark_start = bottom();
 400 }
 401 
 402 HeapWord* ShenandoahHeapRegion::top_at_mark_start() {
 403   return _top_at_mark_start;
 404 }
 405 
 406 HeapWord* ShenandoahHeapRegion::top_at_prev_mark_start() {
 407   return _top_at_prev_mark_start;
 408 }
 409 
 410 HeapWord* ShenandoahHeapRegion::top_prev_mark_bitmap() {
 411   return _top_prev_mark_bitmap;
 412 }
 413 
 414 bool ShenandoahHeapRegion::allocated_after_prev_mark_start(HeapWord* addr) const {
 415   return addr >= _top_at_prev_mark_start;
 416 }
 417 
 418 void ShenandoahHeapRegion::swap_top_at_mark_start() {
 419   HeapWord* tmp = _top_at_prev_mark_start;
 420   _top_at_prev_mark_start = _top_at_mark_start;
 421   _top_at_mark_start = tmp;
 422   ShenandoahHeap::heap()->set_top_at_mark_start(bottom(), tmp);
 423 }
 424 
 425 void ShenandoahHeapRegion::set_top_prev_mark_bitmap(HeapWord* top) {
 426   _top_prev_mark_bitmap = top;
 427 }
 428 
 429 void ShenandoahHeapRegion::pin() {
 430   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 431   assert(_critical_pins >= 0, "sanity");
 432   Atomic::inc(&_critical_pins);
 433 }
 434 
 435 void ShenandoahHeapRegion::unpin() {
 436   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 437   Atomic::dec(&_critical_pins);
 438   assert(_critical_pins >= 0, "sanity");
 439 }
 440 
 441 bool ShenandoahHeapRegion::is_pinned() {
 442   assert(_critical_pins >= 0, "sanity");
 443   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoints");
 444   return _critical_pins > 0;
 445 }