1 /*
   2  * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "gc/shared/markBitMap.inline.hpp"
  29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shenandoah/shenandoahAsserts.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  33 #include "gc/shenandoah/shenandoahBrooksPointer.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/interfaceSupport.inline.hpp"
  46 #include "runtime/prefetch.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 
  52 template <class T>
  53 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
  54   T o = RawAccess<>::oop_load(p);
  55   if (!CompressedOops::is_null(o)) {
  56     oop obj = CompressedOops::decode_not_null(o);
  57     _heap->update_with_forwarded_not_null(p, obj);
  58   }
  59 }
  60 
  61 void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
  62 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  63 
  64 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
  65   size_t new_index = Atomic::add((size_t) 1, &_index);
  66   // get_region() provides the bounds-check and returns NULL on OOB.
  67   return _heap->get_region(new_index - 1);
  68 }
  69 
  70 inline bool ShenandoahHeap::has_forwarded_objects() const {
  71   return _gc_state.is_set(HAS_FORWARDED);
  72 }
  73 
  74 inline WorkGang* ShenandoahHeap::workers() const {
  75   return _workers;
  76 }
  77 
  78 inline WorkGang* ShenandoahHeap::get_safepoint_workers() {
  79   return _safepoint_workers;
  80 }
  81 
  82 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  83   uintptr_t region_start = ((uintptr_t) addr);
  84   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
  85   assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr));
  86   return index;
  87 }
  88 
  89 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
  90   size_t index = heap_region_index_containing(addr);
  91   ShenandoahHeapRegion* const result = get_region(index);
  92   assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr));
  93   return result;
  94 }
  95 
  96 template <class T>
  97 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
  98   if (in_collection_set(obj)) {
  99     shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress());
 100     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 101     RawAccess<IS_NOT_NULL>::oop_store(p, obj);
 102   }
 103 #ifdef ASSERT
 104   else {
 105     shenandoah_assert_not_forwarded(p, obj);
 106   }
 107 #endif
 108   return obj;
 109 }
 110 
 111 template <class T>
 112 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 113   T o = RawAccess<>::oop_load(p);
 114   if (!CompressedOops::is_null(o)) {
 115     oop obj = CompressedOops::decode_not_null(o);
 116     return maybe_update_with_forwarded_not_null(p, obj);
 117   } else {
 118     return NULL;
 119   }
 120 }
 121 
 122 template <class T>
 123 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {
 124   T o = RawAccess<>::oop_load(p);
 125   if (!CompressedOops::is_null(o)) {
 126     oop heap_oop = CompressedOops::decode_not_null(o);
 127     if (in_collection_set(heap_oop)) {
 128       oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 129       if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
 130         forwarded_oop = evacuate_object(heap_oop, Thread::current());
 131       }
 132       oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 133       if (oopDesc::equals_raw(prev, heap_oop)) {
 134         return forwarded_oop;
 135       } else {
 136         return NULL;
 137       }
 138     }
 139     return heap_oop;
 140   } else {
 141     return NULL;
 142   }
 143 }
 144 
 145 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 146   return (oop) Atomic::cmpxchg(n, addr, c);
 147 }
 148 
 149 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 150   narrowOop cmp = CompressedOops::encode(c);
 151   narrowOop val = CompressedOops::encode(n);
 152   return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 153 }
 154 
 155 template <class T>
 156 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
 157   shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 158   shenandoah_assert_correct(p, heap_oop);
 159 
 160   if (in_collection_set(heap_oop)) {
 161     oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 162     if (oopDesc::equals_raw(forwarded_oop, heap_oop)) {
 163       // E.g. during evacuation.
 164       return forwarded_oop;
 165     }
 166 
 167     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 168     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
 169 
 170     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 171     // reference be updated later.
 172     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 173 
 174     if (oopDesc::equals_raw(result, heap_oop)) { // CAS successful.
 175       return forwarded_oop;
 176     } else {
 177       // Note: we used to assert the following here. This doesn't work because sometimes, during
 178       // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy,
 179       // which first copies the array, which potentially contains from-space refs, and only afterwards
 180       // updates all from-space refs to to-space refs, which leaves a short window where the new array
 181       // elements can be from-space.
 182       // assert(CompressedOops::is_null(result) ||
 183       //        oopDesc::equals_raw(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
 184       //       "expect not forwarded");
 185       return NULL;
 186     }
 187   } else {
 188     shenandoah_assert_not_forwarded(p, heap_oop);
 189     return heap_oop;
 190   }
 191 }
 192 
 193 inline bool ShenandoahHeap::cancelled_gc() const {
 194   return _cancelled_gc.get() == CANCELLED;
 195 }
 196 
 197 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 198   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
 199     return cancelled_gc();
 200   }
 201 
 202   jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE);
 203   if (prev == CANCELLABLE || prev == NOT_CANCELLED) {
 204     if (SuspendibleThreadSet::should_yield()) {
 205       SuspendibleThreadSet::yield();
 206     }
 207 
 208     // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets
 209     // to restore to CANCELLABLE.
 210     if (prev == CANCELLABLE) {
 211       _cancelled_gc.set(CANCELLABLE);
 212     }
 213     return false;
 214   } else {
 215     return true;
 216   }
 217 }
 218 
 219 inline bool ShenandoahHeap::try_cancel_gc() {
 220   while (true) {
 221     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
 222     if (prev == CANCELLABLE) return true;
 223     else if (prev == CANCELLED) return false;
 224     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
 225     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
 226     {
 227       // We need to provide a safepoint here, otherwise we might
 228       // spin forever if a SP is pending.
 229       ThreadBlockInVM sp(JavaThread::current());
 230       SpinPause();
 231     }
 232   }
 233 }
 234 
 235 inline void ShenandoahHeap::clear_cancelled_gc() {
 236   _cancelled_gc.set(CANCELLABLE);
 237   _oom_evac_handler.clear();
 238 }
 239 
 240 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 241   assert(UseTLAB, "TLABs should be enabled");
 242 
 243   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 244   if (gclab == NULL) {
 245     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 246            "Performance: thread should have GCLAB: %s", thread->name());
 247     // No GCLABs in this thread, fallback to shared allocation
 248     return NULL;
 249   }
 250   HeapWord* obj = gclab->allocate(size);
 251   if (obj != NULL) {
 252     return obj;
 253   }
 254   // Otherwise...
 255   return allocate_from_gclab_slow(thread, size);
 256 }
 257 
 258 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 259   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
 260     // This thread went through the OOM during evac protocol and it is safe to return
 261     // the forward pointer. It must not attempt to evacuate any more.
 262     return ShenandoahBarrierSet::resolve_forwarded(p);
 263   }
 264 
 265   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
 266 
 267   size_t size_no_fwdptr = (size_t) p->size();
 268   size_t size_with_fwdptr = size_no_fwdptr + ShenandoahBrooksPointer::word_size();
 269 
 270   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 271 
 272   bool alloc_from_gclab = true;
 273   HeapWord* filler = NULL;
 274 
 275 #ifdef ASSERT
 276   if (ShenandoahOOMDuringEvacALot &&
 277       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 278         filler = NULL;
 279   } else {
 280 #endif
 281     if (UseTLAB) {
 282       filler = allocate_from_gclab(thread, size_with_fwdptr);
 283     }
 284     if (filler == NULL) {
 285       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size_with_fwdptr);
 286       filler = allocate_memory(req);
 287       alloc_from_gclab = false;
 288     }
 289 #ifdef ASSERT
 290   }
 291 #endif
 292 
 293   if (filler == NULL) {
 294     control_thread()->handle_alloc_failure_evac(size_with_fwdptr);
 295 
 296     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 297 
 298     return ShenandoahBarrierSet::resolve_forwarded(p);
 299   }
 300 
 301   // Copy the object and initialize its forwarding ptr:
 302   HeapWord* copy = filler + ShenandoahBrooksPointer::word_size();
 303   oop copy_val = oop(copy);
 304 
 305   Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
 306   ShenandoahBrooksPointer::initialize(oop(copy));
 307 
 308   // Try to install the new forwarding pointer.
 309   oop result = ShenandoahBrooksPointer::try_update_forwardee(p, copy_val);
 310 
 311   if (oopDesc::equals_raw(result, p)) {
 312     // Successfully evacuated. Our copy is now the public one!
 313     shenandoah_assert_correct(NULL, copy_val);
 314     return copy_val;
 315   }  else {
 316     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 317     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 318     // But if it happens to contain references to evacuated regions, those references would
 319     // not get updated for this stale copy during this cycle, and we will crash while scanning
 320     // it the next cycle.
 321     //
 322     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 323     // object will overwrite this stale copy, or the filler object on LAB retirement will
 324     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 325     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 326     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 327     if (alloc_from_gclab) {
 328       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(filler, size_with_fwdptr);
 329     } else {
 330       fill_with_object(copy, size_no_fwdptr);
 331     }
 332     shenandoah_assert_correct(NULL, copy_val);
 333     shenandoah_assert_correct(NULL, result);
 334     return result;
 335   }
 336 }
 337 
 338 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 339   return !_marking_context->is_marked(oop(entry));
 340 }
 341 
 342 template <class T>
 343 inline bool ShenandoahHeap::in_collection_set(T p) const {
 344   HeapWord* obj = (HeapWord*) p;
 345   assert(collection_set() != NULL, "Sanity");
 346   assert(is_in(obj), "should be in heap");
 347 
 348   return collection_set()->is_in(obj);
 349 }
 350 
 351 inline bool ShenandoahHeap::is_stable() const {
 352   return _gc_state.is_clear();
 353 }
 354 
 355 inline bool ShenandoahHeap::is_idle() const {
 356   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL);
 357 }
 358 
 359 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
 360   return _gc_state.is_set(MARKING);
 361 }
 362 
 363 inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const {
 364   return _gc_state.is_set(TRAVERSAL);
 365 }
 366 
 367 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
 368   return _gc_state.is_set(EVACUATION);
 369 }
 370 
 371 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 372   return _gc_state.is_set(mask);
 373 }
 374 
 375 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 376   return _degenerated_gc_in_progress.is_set();
 377 }
 378 
 379 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 380   return _full_gc_in_progress.is_set();
 381 }
 382 
 383 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
 384   return _full_gc_move_in_progress.is_set();
 385 }
 386 
 387 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
 388   return _gc_state.is_set(UPDATEREFS);
 389 }
 390 
 391 template<class T>
 392 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 393   marked_object_iterate(region, cl, region->top());
 394 }
 395 
 396 template<class T>
 397 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
 398   assert(ShenandoahBrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 399   assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
 400 
 401   ShenandoahMarkingContext* const ctx = complete_marking_context();
 402   assert(ctx->is_complete(), "sanity");
 403 
 404   MarkBitMap* mark_bit_map = ctx->mark_bit_map();
 405   HeapWord* tams = ctx->top_at_mark_start(region);
 406 
 407   size_t skip_bitmap_delta = ShenandoahBrooksPointer::word_size() + 1;
 408   size_t skip_objsize_delta = ShenandoahBrooksPointer::word_size() /* + actual obj.size() below */;
 409   HeapWord* start = region->bottom() + ShenandoahBrooksPointer::word_size();
 410   HeapWord* end = MIN2(tams + ShenandoahBrooksPointer::word_size(), region->end());
 411 
 412   // Step 1. Scan below the TAMS based on bitmap data.
 413   HeapWord* limit_bitmap = MIN2(limit, tams);
 414 
 415   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
 416   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
 417   HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end);
 418 
 419   intx dist = ShenandoahMarkScanPrefetch;
 420   if (dist > 0) {
 421     // Batched scan that prefetches the oop data, anticipating the access to
 422     // either header, oop field, or forwarding pointer. Not that we cannot
 423     // touch anything in oop, while it still being prefetched to get enough
 424     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 425     // disregarding the object size. However, since we know forwarding pointer
 426     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 427     // there is no point for prefetching the oop contents, as oop->size() will
 428     // touch it prematurely.
 429 
 430     // No variable-length arrays in standard C++, have enough slots to fit
 431     // the prefetch distance.
 432     static const int SLOT_COUNT = 256;
 433     guarantee(dist <= SLOT_COUNT, "adjust slot count");
 434     HeapWord* slots[SLOT_COUNT];
 435 
 436     int avail;
 437     do {
 438       avail = 0;
 439       for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) {
 440         Prefetch::read(cb, ShenandoahBrooksPointer::byte_offset());
 441         slots[avail++] = cb;
 442         cb += skip_bitmap_delta;
 443         if (cb < limit_bitmap) {
 444           cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
 445         }
 446       }
 447 
 448       for (int c = 0; c < avail; c++) {
 449         assert (slots[c] < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams));
 450         assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit));
 451         oop obj = oop(slots[c]);
 452         assert(oopDesc::is_oop(obj), "sanity");
 453         assert(ctx->is_marked(obj), "object expected to be marked");
 454         cl->do_object(obj);
 455       }
 456     } while (avail > 0);
 457   } else {
 458     while (cb < limit_bitmap) {
 459       assert (cb < tams,  "only objects below TAMS here: "  PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams));
 460       assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit));
 461       oop obj = oop(cb);
 462       assert(oopDesc::is_oop(obj), "sanity");
 463       assert(ctx->is_marked(obj), "object expected to be marked");
 464       cl->do_object(obj);
 465       cb += skip_bitmap_delta;
 466       if (cb < limit_bitmap) {
 467         cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap);
 468       }
 469     }
 470   }
 471 
 472   // Step 2. Accurate size-based traversal, happens past the TAMS.
 473   // This restarts the scan at TAMS, which makes sure we traverse all objects,
 474   // regardless of what happened at Step 1.
 475   HeapWord* cs = tams + ShenandoahBrooksPointer::word_size();
 476   while (cs < limit) {
 477     assert (cs > tams,  "only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams));
 478     assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit));
 479     oop obj = oop(cs);
 480     assert(oopDesc::is_oop(obj), "sanity");
 481     assert(ctx->is_marked(obj), "object expected to be marked");
 482     int size = obj->size();
 483     cl->do_object(obj);
 484     cs += size + skip_objsize_delta;
 485   }
 486 }
 487 
 488 template <class T>
 489 class ShenandoahObjectToOopClosure : public ObjectClosure {
 490   T* _cl;
 491 public:
 492   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
 493 
 494   void do_object(oop obj) {
 495     obj->oop_iterate(_cl);
 496   }
 497 };
 498 
 499 template <class T>
 500 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
 501   T* _cl;
 502   MemRegion _bounds;
 503 public:
 504   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
 505     _cl(cl), _bounds(bottom, top) {}
 506 
 507   void do_object(oop obj) {
 508     obj->oop_iterate(_cl, _bounds);
 509   }
 510 };
 511 
 512 template<class T>
 513 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
 514   if (region->is_humongous()) {
 515     HeapWord* bottom = region->bottom();
 516     if (top > bottom) {
 517       region = region->humongous_start_region();
 518       ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
 519       marked_object_iterate(region, &objs);
 520     }
 521   } else {
 522     ShenandoahObjectToOopClosure<T> objs(cl);
 523     marked_object_iterate(region, &objs, top);
 524   }
 525 }
 526 
 527 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const {
 528   if (region_idx < _num_regions) {
 529     return _regions[region_idx];
 530   } else {
 531     return NULL;
 532   }
 533 }
 534 
 535 inline void ShenandoahHeap::mark_complete_marking_context() {
 536   _marking_context->mark_complete();
 537 }
 538 
 539 inline void ShenandoahHeap::mark_incomplete_marking_context() {
 540   _marking_context->mark_incomplete();
 541 }
 542 
 543 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const {
 544   assert (_marking_context->is_complete()," sanity");
 545   return _marking_context;
 546 }
 547 
 548 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
 549   return _marking_context;
 550 }
 551 
 552 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP