1 /* 2 * Copyright (c) 2015, 2018, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 27 #include "classfile/javaClasses.inline.hpp" 28 #include "gc/shared/markBitMap.inline.hpp" 29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 30 #include "gc/shared/suspendibleThreadSet.hpp" 31 #include "gc/shenandoah/brooksPointer.inline.hpp" 32 #include "gc/shenandoah/shenandoahAsserts.hpp" 33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 36 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp" 37 #include "gc/shenandoah/shenandoahHeap.hpp" 38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 40 #include "gc/shenandoah/shenandoahControlThread.hpp" 41 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 42 #include "gc/shenandoah/shenandoahUtils.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "runtime/atomic.hpp" 45 #include "runtime/interfaceSupport.inline.hpp" 46 #include "runtime/prefetch.hpp" 47 #include "runtime/prefetch.inline.hpp" 48 #include "runtime/thread.hpp" 49 #include "utilities/copy.hpp" 50 51 template <class T> 52 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) { 53 T o = RawAccess<>::oop_load(p); 54 if (!CompressedOops::is_null(o)) { 55 oop obj = CompressedOops::decode_not_null(o); 56 _heap->update_with_forwarded_not_null(p, obj); 57 } 58 } 59 60 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); } 61 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); } 62 63 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 64 size_t new_index = Atomic::add((volatile size_t) 1, &_index); 65 // get_region() provides the bounds-check and returns NULL on OOB. 66 return _heap->get_region(new_index - 1); 67 } 68 69 /* 70 * Marks the object. Returns true if the object has not been marked before and has 71 * been marked by this thread. Returns false if the object has already been marked, 72 * or if a competing thread succeeded in marking this object. 73 */ 74 inline bool ShenandoahHeap::mark_next(oop obj) const { 75 shenandoah_assert_not_forwarded(NULL, obj); 76 HeapWord* addr = (HeapWord*) obj; 77 return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr); 78 } 79 80 inline bool ShenandoahHeap::is_marked_next(oop obj) const { 81 HeapWord* addr = (HeapWord*) obj; 82 return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr); 83 } 84 85 inline bool ShenandoahHeap::is_marked_complete(oop obj) const { 86 HeapWord* addr = (HeapWord*) obj; 87 return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr); 88 } 89 90 inline bool ShenandoahHeap::has_forwarded_objects() const { 91 return _gc_state.is_set(HAS_FORWARDED); 92 } 93 94 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 95 uintptr_t region_start = ((uintptr_t) addr); 96 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 97 assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); 98 return index; 99 } 100 101 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { 102 size_t index = heap_region_index_containing(addr); 103 ShenandoahHeapRegion* const result = get_region(index); 104 assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); 105 return result; 106 } 107 108 template <class T> 109 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) { 110 if (in_collection_set(obj)) { 111 shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_concgc()); 112 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 113 RawAccess<OOP_NOT_NULL>::oop_store(p, obj); 114 } 115 #ifdef ASSERT 116 else { 117 shenandoah_assert_not_forwarded(p, obj); 118 } 119 #endif 120 return obj; 121 } 122 123 template <class T> 124 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) { 125 T o = RawAccess<>::oop_load(p); 126 if (!CompressedOops::is_null(o)) { 127 oop obj = CompressedOops::decode_not_null(o); 128 return maybe_update_with_forwarded_not_null(p, obj); 129 } else { 130 return NULL; 131 } 132 } 133 134 template <class T> 135 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { 136 T o = RawAccess<>::oop_load(p); 137 if (!CompressedOops::is_null(o)) { 138 oop heap_oop = CompressedOops::decode_not_null(o); 139 if (in_collection_set(heap_oop)) { 140 oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); 141 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) { 142 forwarded_oop = evacuate_object(heap_oop, Thread::current()); 143 } 144 oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 145 if (oopDesc::unsafe_equals(prev, heap_oop)) { 146 return forwarded_oop; 147 } else { 148 return NULL; 149 } 150 } 151 return heap_oop; 152 } else { 153 return NULL; 154 } 155 } 156 157 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) { 158 return (oop) Atomic::cmpxchg(n, addr, c); 159 } 160 161 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) { 162 narrowOop cmp = CompressedOops::encode(c); 163 narrowOop val = CompressedOops::encode(n); 164 return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp)); 165 } 166 167 template <class T> 168 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) { 169 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress()); 170 shenandoah_assert_correct(p, heap_oop); 171 172 if (in_collection_set(heap_oop)) { 173 oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); 174 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) { 175 // E.g. during evacuation. 176 return forwarded_oop; 177 } 178 179 shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress()); 180 shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_concgc()); 181 182 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, 183 p2i(p), p2i(heap_oop), p2i(forwarded_oop)); 184 185 // If this fails, another thread wrote to p before us, it will be logged in SATB and the 186 // reference be updated later. 187 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 188 189 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful. 190 return forwarded_oop; 191 } else { 192 // Note: we used to assert the following here. This doesn't work because sometimes, during 193 // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy, 194 // which first copies the array, which potentially contains from-space refs, and only afterwards 195 // updates all from-space refs to to-space refs, which leaves a short window where the new array 196 // elements can be from-space. 197 // assert(CompressedOops::is_null(result) || 198 // oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)), 199 // "expect not forwarded"); 200 return NULL; 201 } 202 } else { 203 shenandoah_assert_not_forwarded(p, heap_oop); 204 return heap_oop; 205 } 206 } 207 208 inline bool ShenandoahHeap::cancelled_concgc() const { 209 return _cancelled_concgc.get() == CANCELLED; 210 } 211 212 inline bool ShenandoahHeap::check_cancelled_concgc_and_yield(bool sts_active) { 213 if (! (sts_active && ShenandoahSuspendibleWorkers)) { 214 return cancelled_concgc(); 215 } 216 217 jbyte prev = _cancelled_concgc.cmpxchg(NOT_CANCELLED, CANCELLABLE); 218 if (prev == CANCELLABLE || prev == NOT_CANCELLED) { 219 220 if (SuspendibleThreadSet::should_yield()) { 221 SuspendibleThreadSet::yield(); 222 } 223 224 // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets 225 // to restore to CANCELLABLE. 226 if (prev == CANCELLABLE) { 227 _cancelled_concgc.set(CANCELLABLE); 228 } 229 return false; 230 } else { 231 return true; 232 } 233 } 234 235 inline bool ShenandoahHeap::try_cancel_concgc() { 236 while (true) { 237 jbyte prev = _cancelled_concgc.cmpxchg(CANCELLED, CANCELLABLE); 238 if (prev == CANCELLABLE) return true; 239 else if (prev == CANCELLED) return false; 240 assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers"); 241 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED"); 242 { 243 // We need to provide a safepoint here, otherwise we might 244 // spin forever if a SP is pending. 245 ThreadBlockInVM sp(JavaThread::current()); 246 SpinPause(); 247 } 248 } 249 } 250 251 inline void ShenandoahHeap::clear_cancelled_concgc() { 252 _cancelled_concgc.set(CANCELLABLE); 253 _oom_evac_handler.clear(); 254 } 255 256 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 257 if (UseTLAB) { 258 if (!thread->gclab().is_initialized()) { 259 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 260 "Performance: thread should have GCLAB: %s", thread->name()); 261 // No GCLABs in this thread, fallback to shared allocation 262 return NULL; 263 } 264 HeapWord* obj = thread->gclab().allocate(size); 265 if (obj != NULL) { 266 return obj; 267 } 268 // Otherwise... 269 return allocate_from_gclab_slow(thread, size); 270 } else { 271 return NULL; 272 } 273 } 274 275 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { 276 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { 277 // This thread went through the OOM during evac protocol and it is safe to return 278 // the forward pointer. It must not attempt to evacuate any more. 279 return ShenandoahBarrierSet::resolve_forwarded(p); 280 } 281 282 size_t size_no_fwdptr = (size_t) p->size(); 283 size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size(); 284 285 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 286 287 bool alloc_from_gclab = true; 288 HeapWord* filler; 289 #ifdef ASSERT 290 291 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in ShenandoahOOMDuringEvacHandler"); 292 293 if (ShenandoahOOMDuringEvacALot && 294 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 295 filler = NULL; 296 } else { 297 #endif 298 filler = allocate_from_gclab(thread, size_with_fwdptr); 299 if (filler == NULL) { 300 filler = allocate_memory(size_with_fwdptr, _alloc_shared_gc); 301 alloc_from_gclab = false; 302 } 303 #ifdef ASSERT 304 } 305 #endif 306 307 if (filler == NULL) { 308 control_thread()->handle_alloc_failure_evac(size_with_fwdptr); 309 310 _oom_evac_handler.handle_out_of_memory_during_evacuation(); 311 312 return ShenandoahBarrierSet::resolve_forwarded(p); 313 } 314 315 // Copy the object and initialize its forwarding ptr: 316 HeapWord* copy = filler + BrooksPointer::word_size(); 317 oop copy_val = oop(copy); 318 319 Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr); 320 BrooksPointer::initialize(oop(copy)); 321 322 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT, 323 p2i(p), p2i(copy)); 324 325 // Try to install the new forwarding pointer. 326 oop result = BrooksPointer::try_update_forwardee(p, copy_val); 327 328 if (oopDesc::unsafe_equals(result, p)) { 329 // Successfully evacuated. Our copy is now the public one! 330 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " succeeded", 331 p2i(p), p2i(copy)); 332 333 334 #ifdef ASSERT 335 assert(oopDesc::is_oop(copy_val), "expect oop"); 336 assert(p->klass() == copy_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, 337 p2i(p), p2i(copy)); 338 #endif 339 return copy_val; 340 } else { 341 // Failed to evacuate. We need to deal with the object that is left behind. Since this 342 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 343 // But if it happens to contain references to evacuated regions, those references would 344 // not get updated for this stale copy during this cycle, and we will crash while scanning 345 // it the next cycle. 346 // 347 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 348 // object will overwrite this stale copy, or the filler object on LAB retirement will 349 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 350 // have to explicitly overwrite the copy with the filler object. With that overwrite, 351 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 352 if (alloc_from_gclab) { 353 thread->gclab().rollback(size_with_fwdptr); 354 } else { 355 fill_with_object(copy, size_no_fwdptr); 356 } 357 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT, 358 p2i(p), p2i(copy), p2i(result)); 359 return result; 360 } 361 } 362 363 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 364 return ! is_marked_next(oop(entry)); 365 } 366 367 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const { 368 assert(collection_set() != NULL, "Sanity"); 369 return collection_set()->is_in(region_index); 370 } 371 372 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const { 373 return region_in_collection_set(r->region_number()); 374 } 375 376 template <class T> 377 inline bool ShenandoahHeap::in_collection_set(T p) const { 378 HeapWord* obj = (HeapWord*) p; 379 assert(collection_set() != NULL, "Sanity"); 380 assert(is_in(obj), "should be in heap"); 381 382 return collection_set()->is_in(obj); 383 } 384 385 inline bool ShenandoahHeap::is_stable() const { 386 return _gc_state.is_clear(); 387 } 388 389 inline bool ShenandoahHeap::is_idle() const { 390 return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL); 391 } 392 393 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 394 return _gc_state.is_set(MARKING); 395 } 396 397 inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const { 398 return _gc_state.is_set(TRAVERSAL); 399 } 400 401 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 402 return _gc_state.is_set(EVACUATION); 403 } 404 405 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { 406 return _gc_state.is_set(mask); 407 } 408 409 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 410 return _degenerated_gc_in_progress.is_set(); 411 } 412 413 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 414 return _full_gc_in_progress.is_set(); 415 } 416 417 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 418 return _full_gc_move_in_progress.is_set(); 419 } 420 421 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 422 return _gc_state.is_set(UPDATEREFS); 423 } 424 425 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const { 426 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); 427 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index]; 428 bool alloc_after_mark_start = addr >= top_at_mark_start; 429 return alloc_after_mark_start; 430 } 431 432 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const { 433 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); 434 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index]; 435 bool alloc_after_mark_start = addr >= top_at_mark_start; 436 return alloc_after_mark_start; 437 } 438 439 template<class T> 440 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 441 marked_object_iterate(region, cl, region->top()); 442 } 443 444 template<class T> 445 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) { 446 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit()); 447 } 448 449 template<class T> 450 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 451 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj"); 452 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 453 454 MarkBitMap* mark_bit_map = _complete_mark_bit_map; 455 HeapWord* tams = complete_top_at_mark_start(region->bottom()); 456 457 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1; 458 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */; 459 HeapWord* start = region->bottom() + BrooksPointer::word_size(); 460 HeapWord* end = MIN2(tams + BrooksPointer::word_size(), region->end()); 461 462 // Step 1. Scan below the TAMS based on bitmap data. 463 HeapWord* limit_bitmap = MIN2(limit, tams); 464 465 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 466 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 467 HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end); 468 469 intx dist = ShenandoahMarkScanPrefetch; 470 if (dist > 0) { 471 // Batched scan that prefetches the oop data, anticipating the access to 472 // either header, oop field, or forwarding pointer. Not that we cannot 473 // touch anything in oop, while it still being prefetched to get enough 474 // time for prefetch to work. This is why we try to scan the bitmap linearly, 475 // disregarding the object size. However, since we know forwarding pointer 476 // preceeds the object, we can skip over it. Once we cannot trust the bitmap, 477 // there is no point for prefetching the oop contents, as oop->size() will 478 // touch it prematurely. 479 480 // No variable-length arrays in standard C++, have enough slots to fit 481 // the prefetch distance. 482 static const int SLOT_COUNT = 256; 483 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 484 HeapWord* slots[SLOT_COUNT]; 485 486 int avail; 487 do { 488 avail = 0; 489 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 490 Prefetch::read(cb, BrooksPointer::byte_offset()); 491 slots[avail++] = cb; 492 cb += skip_bitmap_delta; 493 if (cb < limit_bitmap) { 494 cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); 495 } 496 } 497 498 for (int c = 0; c < avail; c++) { 499 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 500 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 501 oop obj = oop(slots[c]); 502 do_object_marked_complete(cl, obj); 503 } 504 } while (avail > 0); 505 } else { 506 while (cb < limit_bitmap) { 507 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 508 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 509 oop obj = oop(cb); 510 do_object_marked_complete(cl, obj); 511 cb += skip_bitmap_delta; 512 if (cb < limit_bitmap) { 513 cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); 514 } 515 } 516 } 517 518 // Step 2. Accurate size-based traversal, happens past the TAMS. 519 // This restarts the scan at TAMS, which makes sure we traverse all objects, 520 // regardless of what happened at Step 1. 521 HeapWord* cs = tams + BrooksPointer::word_size(); 522 while (cs < limit) { 523 assert (cs > tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 524 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 525 oop obj = oop(cs); 526 int size = obj->size(); 527 do_object_marked_complete(cl, obj); 528 cs += size + skip_objsize_delta; 529 } 530 } 531 532 template<class T> 533 inline void ShenandoahHeap::do_object_marked_complete(T* cl, oop obj) { 534 assert(oopDesc::is_oop(obj), "sanity"); 535 assert(is_marked_complete(obj), "object expected to be marked"); 536 cl->do_object(obj); 537 } 538 539 template <class T> 540 class ShenandoahObjectToOopClosure : public ObjectClosure { 541 T* _cl; 542 public: 543 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 544 545 void do_object(oop obj) { 546 obj->oop_iterate(_cl); 547 } 548 }; 549 550 template <class T> 551 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 552 T* _cl; 553 MemRegion _bounds; 554 public: 555 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 556 _cl(cl), _bounds(bottom, top) {} 557 558 void do_object(oop obj) { 559 obj->oop_iterate(_cl, _bounds); 560 } 561 }; 562 563 template<class T> 564 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 565 if (region->is_humongous()) { 566 HeapWord* bottom = region->bottom(); 567 if (top > bottom) { 568 region = region->humongous_start_region(); 569 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 570 marked_object_iterate(region, &objs); 571 } 572 } else { 573 ShenandoahObjectToOopClosure<T> objs(cl); 574 marked_object_iterate(region, &objs, top); 575 } 576 } 577 578 template<class T> 579 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl) { 580 marked_object_oop_iterate(region, cl, region->top()); 581 } 582 583 template<class T> 584 inline void ShenandoahHeap::marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl) { 585 marked_object_oop_iterate(region, cl, region->concurrent_iteration_safe_limit()); 586 } 587 588 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { 589 if (region_idx >= _num_regions) { 590 return NULL; 591 } else { 592 return _regions[region_idx]; 593 } 594 } 595 596 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP