< prev index next >

src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp

Print this page
rev 12551 : Refactor/consolidate/cleanup


  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shared/space.inline.hpp"
  29 #include "memory/universe.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 
  35 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never);
  36 size_t ShenandoahHeapRegion::RegionSizeShift = 0;
  37 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  38 
  39 jint ShenandoahHeapRegion::initialize_heap_region(HeapWord* start,
  40                                                   size_t regionSizeWords, size_t index) {
  41 
  42   reserved = MemRegion(start, regionSizeWords);
  43   ContiguousSpace::initialize(reserved, true, false);
  44   _live_data = 0;
  45   _is_in_collection_set = false;
  46   _region_number = index;
  47 #ifdef ASSERT
  48   _mem_protection_level = 1; // Off, level 1.
  49 #endif
  50   _top_at_mark_start = bottom();
  51   _top_at_prev_mark_start = bottom();
  52   _top_prev_mark_bitmap = bottom();
  53   return JNI_OK;
  54 }
  55 
  56 size_t ShenandoahHeapRegion::region_number() const {
  57   return _region_number;
  58 }
  59 
  60 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
  61   set_top(top() - size);
  62   return true;
  63 }
  64 
  65 void ShenandoahHeapRegion::clear_live_data() {
  66   assert(Thread::current()->is_VM_thread(), "by VM thread");
  67   _live_data = 0;
  68 }
  69 
  70 void ShenandoahHeapRegion::set_live_data(size_t s) {
  71   assert(Thread::current()->is_VM_thread(), "by VM thread");
  72   _live_data = s;
  73 }
  74 
  75 size_t ShenandoahHeapRegion::get_live_data() const {
  76   assert (sizeof(julong) == sizeof(size_t), "do not read excessively");
  77   return (size_t)OrderAccess::load_acquire((volatile julong*)&_live_data);
  78 }
  79 
  80 size_t ShenandoahHeapRegion::garbage() const {
  81   assert(used() >= get_live_data() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
  82          get_live_data(), used());
  83   size_t result = used() - get_live_data();
  84   return result;
  85 }
  86 
  87 bool ShenandoahHeapRegion::is_in_collection_set() const {
  88   return _is_in_collection_set;


















  89 }
  90 
  91 #include <sys/mman.h>
  92 
  93 #ifdef ASSERT
  94 
  95 void ShenandoahHeapRegion::memProtectionOn() {
  96   /*
  97   log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level);
  98   print(tty);
  99   */
 100   MutexLockerEx ml(&_mem_protect_lock, true);
 101   assert(_mem_protection_level >= 1, "invariant");
 102 
 103   if (--_mem_protection_level == 0) {
 104     if (ShenandoahVerifyWritesToFromSpace) {
 105       assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
 106       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
 107     } else {
 108       assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
 109       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
 110     }
 111   }
 112 }
 113 
 114 void ShenandoahHeapRegion::memProtectionOff() {
 115   /*
 116   tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
 117   print(tty);
 118   */
 119   MutexLockerEx ml(&_mem_protect_lock, true);
 120   assert(_mem_protection_level >= 0, "invariant");
 121   if (_mem_protection_level++ == 0) {
 122     os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
 123   }
 124 }
 125 
 126 #endif
 127 
 128 void ShenandoahHeapRegion::set_is_in_collection_set(bool b) {
 129   assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
 130 
 131   _is_in_collection_set = b;
 132 
 133   if (b) {
 134     // tty->print_cr("registering region in fast-cset");
 135     // print();
 136     ShenandoahHeap::heap()->register_region_with_in_cset_fast_test(this);
 137   }
 138 
 139 #ifdef ASSERT
 140   if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
 141     if (b) {
 142       memProtectionOn();
 143       assert(_mem_protection_level == 0, "need to be protected here");
 144     } else {
 145       assert(_mem_protection_level == 0, "need to be protected here");
 146       memProtectionOff();
 147     }
 148   }
 149 #endif
 150 }
 151 
 152 ByteSize ShenandoahHeapRegion::is_in_collection_set_offset() {
 153   return byte_offset_of(ShenandoahHeapRegion, _is_in_collection_set);
 154 }
 155 
 156 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 157   st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number);
 158 
 159   if (is_in_collection_set())
 160     st->print("C");
 161   if (is_humongous_start()) {
 162     st->print("H");
 163   }
 164   if (is_humongous_continuation()) {
 165     st->print("h");
 166   }
 167   //else
 168     st->print(" ");
 169 
 170   st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
 171                get_live_data(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
 172 }
 173 
 174 
 175 class SkipUnreachableObjectToOopClosure: public ObjectClosure {
 176   ExtendedOopClosure* _cl;
 177   bool _skip_unreachable_objects;
 178   ShenandoahHeap* _heap;
 179 
 180 public:
 181   SkipUnreachableObjectToOopClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
 182     _cl(cl), _skip_unreachable_objects(skip_unreachable_objects), _heap(ShenandoahHeap::heap()) {}
 183 
 184   void do_object(oop obj) {
 185 
 186     if ((! _skip_unreachable_objects) || _heap->is_marked_current(obj)) {
 187 #ifdef ASSERT
 188       if (_skip_unreachable_objects) {
 189         assert(_heap->is_marked_current(obj), "obj must be live");
 190       }
 191 #endif
 192       obj->oop_iterate(_cl);
 193     }
 194 
 195   }
 196 };
 197 
 198 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
 199   HeapWord* p = bottom() + BrooksPointer::word_size();
 200   ShenandoahHeap* heap = ShenandoahHeap::heap();
 201   while (p < top() && !(allow_cancel && heap->cancelled_concgc())) {
 202     blk->do_object(oop(p));
 203 #ifdef ASSERT
 204     if (ShenandoahVerifyReadsToFromSpace) {
 205       memProtectionOff();
 206       p += oop(p)->size() + BrooksPointer::word_size();
 207       memProtectionOn();
 208     } else {
 209       p += oop(p)->size() + BrooksPointer::word_size();
 210     }
 211 #else
 212       p += oop(p)->size() + BrooksPointer::word_size();
 213 #endif
 214   }
 215 }
 216 
 217 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
 218   HeapWord * limit = concurrent_iteration_safe_limit();
 219   assert(limit <= top(), "sanity check");
 220   for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) {
 221     size_t size = blk->do_object_careful(oop(p));
 222     if (size == 0) {
 223       return p;  // failed at p
 224     } else {
 225       p += size + BrooksPointer::word_size();
 226     }
 227   }
 228   return NULL; // all done
 229 }
 230 
 231 void ShenandoahHeapRegion::oop_iterate_skip_unreachable(ExtendedOopClosure* cl, bool skip_unreachable_objects) {
 232   SkipUnreachableObjectToOopClosure cl2(cl, skip_unreachable_objects);
 233   object_iterate_interruptible(&cl2, false);
 234 }
 235 
 236 void ShenandoahHeapRegion::fill_region() {
 237   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 238 
 239   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 240     HeapWord* filler = allocate(BrooksPointer::word_size());
 241     HeapWord* obj = allocate(end() - top());
 242     sh->fill_with_object(obj, end() - obj);
 243     BrooksPointer::initialize(oop(obj));
 244   }
 245 }
 246 
 247 void ShenandoahHeapRegion::set_humongous_start(bool start) {
 248   _humongous_start = start;
 249 }
 250 
 251 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
 252   _humongous_continuation = continuation;
 253 }
 254 
 255 bool ShenandoahHeapRegion::is_humongous() const {
 256   return _humongous_start || _humongous_continuation;
 257 }
 258 
 259 bool ShenandoahHeapRegion::is_humongous_start() const {
 260   return _humongous_start;
 261 }
 262 
 263 bool ShenandoahHeapRegion::is_humongous_continuation() const {
 264   return _humongous_continuation;
 265 }
 266 
 267 void ShenandoahHeapRegion::do_reset() {
 268   ContiguousSpace::initialize(reserved, true, false);
 269   clear_live_data();
 270   _humongous_start = false;
 271   _humongous_continuation = false;
 272   // _top_at_mark_start = bottom();
 273   _top_at_prev_mark_start = bottom();
 274 }
 275 
 276 void ShenandoahHeapRegion::recycle() {
 277   do_reset();
 278   set_is_in_collection_set(false);
 279 }
 280 
 281 void ShenandoahHeapRegion::reset() {
 282   assert(_mem_protection_level == 1, "needs to be unprotected here");
 283   do_reset();
 284   _is_in_collection_set = false;
 285 }
 286 
 287 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 288   assert(MemRegion(bottom(), end()).contains(p),
 289          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 290          p2i(p), p2i(bottom()), p2i(end()));
 291   if (p >= top()) {
 292     return top();
 293   } else {
 294     HeapWord* last = bottom() + BrooksPointer::word_size();
 295     HeapWord* cur = last;
 296     while (cur <= p) {
 297       last = cur;
 298       cur += oop(cur)->size() + BrooksPointer::word_size();
 299     }
 300     assert(oop(last)->is_oop(),
 301            PTR_FORMAT" should be an object start", p2i(last));
 302     return last;
 303   }
 304 }


 347   // Recalculate the region size to make sure it's a power of
 348   // 2. This means that region_size is the largest power of 2 that's
 349   // <= what we've calculated so far.
 350   region_size = ((uintx)1 << region_size_log);
 351 
 352   // Now, set up the globals.
 353   guarantee(RegionSizeShift == 0, "we should only set it once");
 354   RegionSizeShift = region_size_log;
 355 
 356   guarantee(RegionSizeBytes == 0, "we should only set it once");
 357   RegionSizeBytes = (size_t)region_size;
 358 
 359   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 360   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 361   log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift);
 362   log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
 363   log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 364 }
 365 
 366 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 367   return ShenandoahHeap::heap()->next_compaction_region(this);
 368 }
 369 
 370 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 371   scan_and_forward(this, cp);
 372 }
 373 
 374 void ShenandoahHeapRegion::adjust_pointers() {
 375   // Check first is there is any work to do.
 376   if (used() == 0) {
 377     return;   // Nothing to do.
 378   }
 379 
 380   scan_and_adjust_pointers(this);
 381 }
 382 
 383 void ShenandoahHeapRegion::compact() {
 384   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 385   scan_and_compact(this);
 386 }
 387 
 388 void ShenandoahHeapRegion::init_top_at_mark_start() {
 389   _top_at_mark_start = top();
 390   ShenandoahHeap::heap()->set_top_at_mark_start(bottom(), top());
 391 }
 392 
 393 void ShenandoahHeapRegion::set_top_at_mark_start(HeapWord* top) {
 394   _top_at_mark_start = top;
 395   ShenandoahHeap::heap()->set_top_at_mark_start(bottom(), top);
 396 }
 397 
 398 void ShenandoahHeapRegion::reset_top_at_prev_mark_start() {
 399   _top_at_prev_mark_start = bottom();
 400 }
 401 
 402 HeapWord* ShenandoahHeapRegion::top_at_mark_start() {
 403   return _top_at_mark_start;
 404 }
 405 
 406 HeapWord* ShenandoahHeapRegion::top_at_prev_mark_start() {
 407   return _top_at_prev_mark_start;
 408 }
 409 
 410 HeapWord* ShenandoahHeapRegion::top_prev_mark_bitmap() {
 411   return _top_prev_mark_bitmap;
 412 }
 413 
 414 bool ShenandoahHeapRegion::allocated_after_prev_mark_start(HeapWord* addr) const {
 415   return addr >= _top_at_prev_mark_start;
 416 }
 417 
 418 void ShenandoahHeapRegion::swap_top_at_mark_start() {
 419   HeapWord* tmp = _top_at_prev_mark_start;
 420   _top_at_prev_mark_start = _top_at_mark_start;
 421   _top_at_mark_start = tmp;
 422   ShenandoahHeap::heap()->set_top_at_mark_start(bottom(), tmp);
 423 }
 424 
 425 void ShenandoahHeapRegion::set_top_prev_mark_bitmap(HeapWord* top) {
 426   _top_prev_mark_bitmap = top;
 427 }
 428 
 429 void ShenandoahHeapRegion::pin() {
 430   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 431   assert(_critical_pins >= 0, "sanity");
 432   Atomic::inc(&_critical_pins);
 433 }
 434 
 435 void ShenandoahHeapRegion::unpin() {
 436   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 437   Atomic::dec(&_critical_pins);
 438   assert(_critical_pins >= 0, "sanity");
 439 }
 440 
 441 bool ShenandoahHeapRegion::is_pinned() {
 442   assert(_critical_pins >= 0, "sanity");
 443   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoints");
 444   return _critical_pins > 0;
 445 }


  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shared/space.inline.hpp"
  29 #include "memory/universe.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/mutexLocker.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 
  35 Monitor ShenandoahHeapRegion::_mem_protect_lock(Mutex::special, "ShenandoahMemProtect_lock", true, Monitor::_safepoint_check_never);
  36 size_t ShenandoahHeapRegion::RegionSizeShift = 0;
  37 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  38 
  39 jint ShenandoahHeapRegion::initialize_heap_region(ShenandoahHeap* heap, HeapWord* start,
  40                                                   size_t regionSizeWords, size_t index) {
  41   _heap = heap;
  42   reserved = MemRegion(start, regionSizeWords);
  43   ContiguousSpace::initialize(reserved, true, false);
  44   _live_data = 0;

  45   _region_number = index;
  46 #ifdef ASSERT
  47   _mem_protection_level = 1; // Off, level 1.
  48 #endif



  49   return JNI_OK;
  50 }
  51 
  52 size_t ShenandoahHeapRegion::region_number() const {
  53   return _region_number;
  54 }
  55 
  56 bool ShenandoahHeapRegion::rollback_allocation(uint size) {
  57   set_top(top() - size);
  58   return true;
  59 }
  60 
  61 void ShenandoahHeapRegion::clear_live_data() {
  62   assert(Thread::current()->is_VM_thread(), "by VM thread");
  63   _live_data = 0;
  64 }
  65 
  66 void ShenandoahHeapRegion::set_live_data(size_t s) {
  67   assert(Thread::current()->is_VM_thread(), "by VM thread");
  68   _live_data = s;
  69 }
  70 
  71 size_t ShenandoahHeapRegion::get_live_data() const {
  72   assert (sizeof(julong) == sizeof(size_t), "do not read excessively");
  73   return (size_t)OrderAccess::load_acquire((volatile julong*)&_live_data);
  74 }
  75 
  76 size_t ShenandoahHeapRegion::garbage() const {
  77   assert(used() >= get_live_data() || is_humongous(), "Live Data must be a subset of used() live: "SIZE_FORMAT" used: "SIZE_FORMAT,
  78          get_live_data(), used());
  79   size_t result = used() - get_live_data();
  80   return result;
  81 }
  82 
  83 bool ShenandoahHeapRegion::in_collection_set() const {
  84   return _heap->region_in_collection_set(_region_number);
  85 }
  86 
  87 void ShenandoahHeapRegion::set_in_collection_set(bool b) {
  88   assert(! (is_humongous() && b), "never ever enter a humongous region into the collection set");
  89 
  90   _heap->set_region_in_collection_set(_region_number, b);
  91 
  92 #ifdef ASSERT
  93   if (ShenandoahVerifyWritesToFromSpace || ShenandoahVerifyReadsToFromSpace) {
  94     if (b) {
  95       memProtectionOn();
  96       assert(_mem_protection_level == 0, "need to be protected here");
  97     } else {
  98       assert(_mem_protection_level == 0, "need to be protected here");
  99       memProtectionOff();
 100     }
 101   }
 102 #endif
 103 }
 104 
 105 #include <sys/mman.h>
 106 
 107 #ifdef ASSERT
 108 
 109 void ShenandoahHeapRegion::memProtectionOn() {
 110   /*
 111   log_develop_trace(gc)("Protect memory on region level: "INT32_FORMAT, _mem_protection_level);
 112   print(tty);
 113   */
 114   MutexLockerEx ml(&_mem_protect_lock, true);
 115   assert(_mem_protection_level >= 1, "invariant");
 116 
 117   if (--_mem_protection_level == 0) {
 118     if (ShenandoahVerifyWritesToFromSpace) {
 119       assert(! ShenandoahVerifyReadsToFromSpace, "can't verify from-space reads when verifying from-space writes");
 120       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_READ);
 121     } else {
 122       assert(ShenandoahVerifyReadsToFromSpace, "need to be verifying reads here");
 123       os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_NONE);
 124     }
 125   }
 126 }
 127 
 128 void ShenandoahHeapRegion::memProtectionOff() {
 129   /*
 130   tty->print_cr("unprotect memory on region level: "INT32_FORMAT, _mem_protection_level);
 131   print(tty);
 132   */
 133   MutexLockerEx ml(&_mem_protect_lock, true);
 134   assert(_mem_protection_level >= 0, "invariant");
 135   if (_mem_protection_level++ == 0) {
 136     os::protect_memory((char*) bottom(), end() - bottom(), os::MEM_PROT_RW);
 137   }
 138 }
 139 
 140 #endif
 141 




























 142 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 143   st->print("ShenandoahHeapRegion: "PTR_FORMAT"/"SIZE_FORMAT, p2i(this), _region_number);
 144 
 145   if (in_collection_set())
 146     st->print("C");
 147   if (is_humongous_start()) {
 148     st->print("H");
 149   }
 150   if (is_humongous_continuation()) {
 151     st->print("h");
 152   }
 153   //else
 154     st->print(" ");
 155 
 156   st->print_cr("live = "SIZE_FORMAT" garbage = "SIZE_FORMAT" bottom = "PTR_FORMAT" end = "PTR_FORMAT" top = "PTR_FORMAT,
 157                get_live_data(), garbage(), p2i(bottom()), p2i(end()), p2i(top()));
 158 }
 159 
 160 























 161 void ShenandoahHeapRegion::object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel) {
 162   HeapWord* p = bottom() + BrooksPointer::word_size();
 163   while (p < top() && !(allow_cancel && _heap->cancelled_concgc())) {

 164     blk->do_object(oop(p));
 165 #ifdef ASSERT
 166     if (ShenandoahVerifyReadsToFromSpace) {
 167       memProtectionOff();
 168       p += oop(p)->size() + BrooksPointer::word_size();
 169       memProtectionOn();
 170     } else {
 171       p += oop(p)->size() + BrooksPointer::word_size();
 172     }
 173 #else
 174       p += oop(p)->size() + BrooksPointer::word_size();
 175 #endif
 176   }
 177 }
 178 
 179 HeapWord* ShenandoahHeapRegion::object_iterate_careful(ObjectClosureCareful* blk) {
 180   HeapWord * limit = concurrent_iteration_safe_limit();
 181   assert(limit <= top(), "sanity check");
 182   for (HeapWord* p = bottom() + BrooksPointer::word_size(); p < limit;) {
 183     size_t size = blk->do_object_careful(oop(p));
 184     if (size == 0) {
 185       return p;  // failed at p
 186     } else {
 187       p += size + BrooksPointer::word_size();
 188     }
 189   }
 190   return NULL; // all done
 191 }
 192 





 193 void ShenandoahHeapRegion::fill_region() {
 194   ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
 195 
 196   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
 197     HeapWord* filler = allocate(BrooksPointer::word_size());
 198     HeapWord* obj = allocate(end() - top());
 199     sh->fill_with_object(obj, end() - obj);
 200     BrooksPointer::initialize(oop(obj));
 201   }
 202 }
 203 
 204 void ShenandoahHeapRegion::set_humongous_start(bool start) {
 205   _humongous_start = start;
 206 }
 207 
 208 void ShenandoahHeapRegion::set_humongous_continuation(bool continuation) {
 209   _humongous_continuation = continuation;
 210 }
 211 
 212 bool ShenandoahHeapRegion::is_humongous() const {
 213   return _humongous_start || _humongous_continuation;
 214 }
 215 
 216 bool ShenandoahHeapRegion::is_humongous_start() const {
 217   return _humongous_start;
 218 }
 219 
 220 bool ShenandoahHeapRegion::is_humongous_continuation() const {
 221   return _humongous_continuation;
 222 }
 223 
 224 void ShenandoahHeapRegion::recycle() {
 225   ContiguousSpace::initialize(reserved, true, false);
 226   clear_live_data();
 227   _humongous_start = false;
 228   _humongous_continuation = false;
 229   set_in_collection_set(false);












 230 }
 231 
 232 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {
 233   assert(MemRegion(bottom(), end()).contains(p),
 234          "p ("PTR_FORMAT") not in space ["PTR_FORMAT", "PTR_FORMAT")",
 235          p2i(p), p2i(bottom()), p2i(end()));
 236   if (p >= top()) {
 237     return top();
 238   } else {
 239     HeapWord* last = bottom() + BrooksPointer::word_size();
 240     HeapWord* cur = last;
 241     while (cur <= p) {
 242       last = cur;
 243       cur += oop(cur)->size() + BrooksPointer::word_size();
 244     }
 245     assert(oop(last)->is_oop(),
 246            PTR_FORMAT" should be an object start", p2i(last));
 247     return last;
 248   }
 249 }


 292   // Recalculate the region size to make sure it's a power of
 293   // 2. This means that region_size is the largest power of 2 that's
 294   // <= what we've calculated so far.
 295   region_size = ((uintx)1 << region_size_log);
 296 
 297   // Now, set up the globals.
 298   guarantee(RegionSizeShift == 0, "we should only set it once");
 299   RegionSizeShift = region_size_log;
 300 
 301   guarantee(RegionSizeBytes == 0, "we should only set it once");
 302   RegionSizeBytes = (size_t)region_size;
 303 
 304   log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
 305   log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
 306   log_info(gc, init)("Region size shift: "SIZE_FORMAT, RegionSizeShift);
 307   log_info(gc, init)("Initial number of regions: "SIZE_FORMAT, initial_heap_size / RegionSizeBytes);
 308   log_info(gc, init)("Maximum number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
 309 }
 310 
 311 CompactibleSpace* ShenandoahHeapRegion::next_compaction_space() const {
 312   return _heap->next_compaction_region(this);
 313 }
 314 
 315 void ShenandoahHeapRegion::prepare_for_compaction(CompactPoint* cp) {
 316   scan_and_forward(this, cp);
 317 }
 318 
 319 void ShenandoahHeapRegion::adjust_pointers() {
 320   // Check first is there is any work to do.
 321   if (used() == 0) {
 322     return;   // Nothing to do.
 323   }
 324 
 325   scan_and_adjust_pointers(this);
 326 }
 327 
 328 void ShenandoahHeapRegion::compact() {
 329   assert(!is_humongous(), "Shouldn't be compacting humongous regions");
 330   scan_and_compact(this);









































 331 }
 332 
 333 void ShenandoahHeapRegion::pin() {
 334   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 335   assert(_critical_pins >= 0, "sanity");
 336   Atomic::inc(&_critical_pins);
 337 }
 338 
 339 void ShenandoahHeapRegion::unpin() {
 340   assert(! SafepointSynchronize::is_at_safepoint(), "only outside safepoints");
 341   Atomic::dec(&_critical_pins);
 342   assert(_critical_pins >= 0, "sanity");
 343 }
 344 
 345 bool ShenandoahHeapRegion::is_pinned() {
 346   assert(_critical_pins >= 0, "sanity");
 347   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoints");
 348   return _critical_pins > 0;
 349 }
< prev index next >