1 /* 2 * Copyright (c) 2015, 2017, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 27 #include "classfile/javaClasses.inline.hpp" 28 #include "gc/g1/suspendibleThreadSet.hpp" 29 #include "gc/shared/markBitMap.inline.hpp" 30 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 31 #include "gc/shenandoah/brooksPointer.inline.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 33 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 35 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp" 36 #include "gc/shenandoah/shenandoahHeap.hpp" 37 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 38 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 39 #include "gc/shenandoah/shenandoahStringDedup.hpp" 40 #include "gc/shenandoah/shenandoahUtils.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/prefetch.hpp" 45 #include "runtime/prefetch.inline.hpp" 46 #include "runtime/thread.hpp" 47 #include "utilities/copy.hpp" 48 49 template <class T> 50 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) { 51 T o = oopDesc::load_heap_oop(p); 52 if (! oopDesc::is_null(o)) { 53 oop obj = oopDesc::decode_heap_oop_not_null(o); 54 _heap->update_oop_ref_not_null(p, obj); 55 } 56 } 57 58 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); } 59 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); } 60 61 /* 62 * Marks the object. Returns true if the object has not been marked before and has 63 * been marked by this thread. Returns false if the object has already been marked, 64 * or if a competing thread succeeded in marking this object. 65 */ 66 inline bool ShenandoahHeap::mark_next(oop obj) const { 67 #ifdef ASSERT 68 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) { 69 tty->print_cr("heap region containing obj:"); 70 ShenandoahHeapRegion* obj_region = heap_region_containing(obj); 71 obj_region->print(); 72 tty->print_cr("heap region containing forwardee:"); 73 ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj)); 74 forward_region->print(); 75 } 76 #endif 77 78 assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects"); 79 return mark_next_no_checks(obj); 80 } 81 82 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const { 83 HeapWord* addr = (HeapWord*) obj; 84 return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr); 85 } 86 87 inline bool ShenandoahHeap::is_marked_next(oop obj) const { 88 HeapWord* addr = (HeapWord*) obj; 89 return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr); 90 } 91 92 inline bool ShenandoahHeap::is_marked_complete(oop obj) const { 93 HeapWord* addr = (HeapWord*) obj; 94 return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr); 95 } 96 97 inline bool ShenandoahHeap::need_update_refs() const { 98 return _need_update_refs; 99 } 100 101 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 102 uintptr_t region_start = ((uintptr_t) addr); 103 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 104 #ifdef ASSERT 105 if (index >= num_regions()) { 106 tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \ 107 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, 108 p2i(base()), 109 p2i(_ordered_regions->get(0)->bottom()), 110 num_regions(), 111 ShenandoahHeapRegion::region_size_bytes()); 112 } 113 #endif 114 assert(index < num_regions(), "heap region index must be in range"); 115 return index; 116 } 117 118 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const { 119 size_t index = heap_region_index_containing(addr); 120 ShenandoahHeapRegion* result = _ordered_regions->get(index); 121 #ifdef ASSERT 122 if (!(addr >= result->bottom() && addr < result->end())) { 123 tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \ 124 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, 125 p2i(base()), 126 p2i(_ordered_regions->get(0)->bottom()), 127 num_regions()); 128 } 129 #endif 130 assert(addr >= result->bottom() && addr < result->end(), "address must be in found region"); 131 return result; 132 } 133 134 template <class T> 135 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) { 136 if (in_collection_set(obj)) { 137 oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 138 assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress() || cancelled_concgc(), "expect forwarded object"); 139 obj = forw; 140 oopDesc::encode_store_heap_oop(p, obj); 141 } 142 #ifdef ASSERT 143 else { 144 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded"); 145 } 146 #endif 147 return obj; 148 } 149 150 template <class T> 151 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) { 152 T o = oopDesc::load_heap_oop(p); 153 if (! oopDesc::is_null(o)) { 154 oop obj = oopDesc::decode_heap_oop_not_null(o); 155 return maybe_update_oop_ref_not_null(p, obj); 156 } else { 157 return NULL; 158 } 159 } 160 161 template <class T> 162 inline oop ShenandoahHeap::evac_update_oop_ref(T* p, bool& evac) { 163 evac = false; 164 T o = oopDesc::load_heap_oop(p); 165 if (! oopDesc::is_null(o)) { 166 oop heap_oop = oopDesc::decode_heap_oop_not_null(o); 167 if (in_collection_set(heap_oop)) { 168 oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr 169 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) { 170 forwarded_oop = evacuate_object(heap_oop, Thread::current(), evac); 171 } 172 oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 173 if (prev == heap_oop) { 174 return forwarded_oop; 175 } else { 176 return NULL; 177 } 178 } 179 return heap_oop; 180 } else { 181 return NULL; 182 } 183 } 184 185 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) { 186 return (oop) Atomic::cmpxchg_ptr(n, addr, c); 187 } 188 189 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) { 190 narrowOop cmp = oopDesc::encode_heap_oop(c); 191 narrowOop val = oopDesc::encode_heap_oop(n); 192 return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp)); 193 } 194 195 template <class T> 196 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) { 197 198 assert((! is_in(p)) || (! in_collection_set(p)) 199 || is_full_gc_in_progress(), 200 "never update refs in from-space, unless evacuation has been cancelled"); 201 202 #ifdef ASSERT 203 if (! is_in(heap_oop)) { 204 print_heap_regions_on(tty); 205 tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p)); 206 assert(is_in(heap_oop), "object must be in heap"); 207 } 208 #endif 209 assert(is_in(heap_oop), "only ever call this on objects in the heap"); 210 if (in_collection_set(heap_oop)) { 211 oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr 212 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) { 213 // E.g. during evacuation. 214 return forwarded_oop; 215 } 216 217 assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object"); 218 219 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, 220 p2i(p), p2i(heap_oop), p2i(forwarded_oop)); 221 222 assert(oopDesc::is_oop(forwarded_oop), "oop required"); 223 assert(is_in(forwarded_oop), "forwardee must be in heap"); 224 assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set"); 225 // If this fails, another thread wrote to p before us, it will be logged in SATB and the 226 // reference be updated later. 227 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 228 229 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful. 230 return forwarded_oop; 231 } else { 232 // Note: we used to assert the following here. This doesn't work because sometimes, during 233 // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy, 234 // which first copies the array, which potentially contains from-space refs, and only afterwards 235 // updates all from-space refs to to-space refs, which leaves a short window where the new array 236 // elements can be from-space. 237 // assert(oopDesc::is_null(result) || 238 // oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)), 239 // "expect not forwarded"); 240 return NULL; 241 } 242 } else { 243 assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)), 244 "expect not forwarded"); 245 return heap_oop; 246 } 247 } 248 249 inline bool ShenandoahHeap::cancelled_concgc() const { 250 return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == CANCELLED; 251 } 252 253 inline bool ShenandoahHeap::check_cancelled_concgc_and_yield(bool sts_active) { 254 if (! (sts_active && ShenandoahSuspendibleWorkers)) { 255 return cancelled_concgc(); 256 } 257 jbyte prev = Atomic::cmpxchg((jbyte)NOT_CANCELLED, &_cancelled_concgc, (jbyte)CANCELLABLE); 258 if (prev == CANCELLABLE || prev == NOT_CANCELLED) { 259 260 if (SuspendibleThreadSet::should_yield()) { 261 SuspendibleThreadSet::yield(); 262 } 263 264 // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets 265 // to restore to CANCELLABLE. 266 if (prev == CANCELLABLE) { 267 OrderAccess::release_store_fence(&_cancelled_concgc, CANCELLABLE); 268 } 269 return false; 270 } else { 271 return true; 272 } 273 } 274 275 inline bool ShenandoahHeap::try_cancel_concgc() { 276 while (true) { 277 jbyte prev = Atomic::cmpxchg((jbyte)CANCELLED, &_cancelled_concgc, (jbyte)CANCELLABLE); 278 if (prev == CANCELLABLE) return true; 279 else if (prev == CANCELLED) return false; 280 assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers"); 281 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED"); 282 { 283 // We need to provide a safepoint here, otherwise we might 284 // spin forever if a SP is pending. 285 ThreadBlockInVM sp(JavaThread::current()); 286 SpinPause(); 287 } 288 } 289 } 290 291 inline void ShenandoahHeap::clear_cancelled_concgc() { 292 OrderAccess::release_store_fence(&_cancelled_concgc, CANCELLABLE); 293 } 294 295 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 296 if (UseTLAB) { 297 if (!thread->gclab().is_initialized()) { 298 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 299 "Performance: thread should have GCLAB: %s", thread->name()); 300 // No GCLABs in this thread, fallback to shared allocation 301 return NULL; 302 } 303 HeapWord* obj = thread->gclab().allocate(size); 304 if (obj != NULL) { 305 return obj; 306 } 307 // Otherwise... 308 return allocate_from_gclab_slow(thread, size); 309 } else { 310 return NULL; 311 } 312 } 313 314 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) { 315 evacuated = false; 316 317 size_t size_no_fwdptr = (size_t) p->size(); 318 size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size(); 319 320 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 321 322 bool alloc_from_gclab = true; 323 HeapWord* filler = allocate_from_gclab(thread, size_with_fwdptr); 324 if (filler == NULL) { 325 filler = allocate_memory(size_with_fwdptr, _alloc_shared_gc); 326 alloc_from_gclab = false; 327 } 328 329 #ifdef ASSERT 330 // Checking that current Java thread does not hold Threads_lock when we get here. 331 // If that ever be the case, we'd deadlock in oom_during_evacuation. 332 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 333 assert(! Threads_lock->owned_by_self() 334 || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here"); 335 } 336 #endif 337 338 if (filler == NULL) { 339 oom_during_evacuation(); 340 // If this is a Java thread, it should have waited 341 // until all GC threads are done, and then we 342 // return the forwardee. 343 oop resolved = ShenandoahBarrierSet::resolve_oop_static(p); 344 return resolved; 345 } 346 347 // Copy the object and initialize its forwarding ptr: 348 HeapWord* copy = filler + BrooksPointer::word_size(); 349 oop copy_val = oop(copy); 350 351 Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr); 352 BrooksPointer::initialize(oop(copy)); 353 354 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT, 355 p2i(p), p2i(copy)); 356 357 // String dedup support 358 bool need_str_dedup = false; 359 if (ShenandoahStringDedup::is_enabled() 360 && java_lang_String::is_instance_inlined(copy_val)) { 361 // We need to increase age before CAS to avoid race condition. 362 // Once new copy is published, other threads may set hash code, 363 // or perform locking, etc. which will race age bits manipulation. 364 copy_val->incr_age(); 365 366 need_str_dedup = ShenandoahStringDedup::is_candidate(copy_val); 367 } 368 369 // Try to install the new forwarding pointer. 370 oop result = BrooksPointer::try_update_forwardee(p, copy_val); 371 372 if (oopDesc::unsafe_equals(result, p)) { 373 // Successfully evacuated. Our copy is now the public one! 374 evacuated = true; 375 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " succeeded", 376 p2i(p), p2i(copy)); 377 378 // Only dedup evacuated string 379 if (need_str_dedup) { 380 // Shenandoah evacuates objects inside and outside of GC safepoints. 381 // But string dedup protocol requires deduplication outside of GC safepoints, 382 // so we need to queue candidates during GC safepoints. 383 // SafepointSynchronize::is_at_safepoint() is not sufficient, because generic safepoints 384 // that might happen during concurrent evacuation are not suspending the deduplication thread, 385 // and pushes to the dedup queue are unsafe. 386 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { 387 assert(!is_full_gc_in_progress(), "Should not get to here"); 388 assert(thread->is_Worker_thread(), "Must be a worker thread during a safepoint"); 389 // Use worker thread id instead of worker_id to avoid passing down worker_id. 390 // This may cause imbalance among the queues, but it is okay, since deduplication is 391 // single threaded. 392 ShenandoahStringDedup::enqueue_from_safepoint(copy_val, thread->as_Worker_thread()->id()); 393 } else { 394 ShenandoahStringDedup::deduplicate(copy_val); 395 } 396 } 397 398 #ifdef ASSERT 399 assert(oopDesc::is_oop(copy_val), "expect oop"); 400 assert(p->klass() == copy_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, 401 p2i(p), p2i(copy)); 402 #endif 403 return copy_val; 404 } else { 405 // Failed to evacuate. We need to deal with the object that is left behind. Since this 406 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 407 // But if it happens to contain references to evacuated regions, those references would 408 // not get updated for this stale copy during this cycle, and we will crash while scanning 409 // it the next cycle. 410 // 411 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 412 // object will overwrite this stale copy, or the filler object on LAB retirement will 413 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 414 // have to explicitly overwrite the copy with the filler object. With that overwrite, 415 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 416 if (alloc_from_gclab) { 417 thread->gclab().rollback(size_with_fwdptr); 418 } else { 419 fill_with_object(copy, size_no_fwdptr); 420 } 421 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT, 422 p2i(p), p2i(copy), p2i(result)); 423 return result; 424 } 425 } 426 427 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 428 // TODO: Make this faster! It's used in a hot path. 429 // TODO: it's not strictly matrix-related, but used only in partial (i.e. matrix) GCs. 430 if (is_concurrent_partial_in_progress()) { 431 assert(! in_collection_set((oop) entry), "must not get cset objects here"); 432 // assert(free_regions()->contains(heap_region_containing(entry)), "expect to-space object"); 433 return true; 434 } else if (concurrent_mark_in_progress()) { 435 return ! is_marked_next(oop(entry)); 436 } else { 437 return false; 438 } 439 } 440 441 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const { 442 assert(collection_set() != NULL, "Sanity"); 443 return collection_set()->is_in(region_index); 444 } 445 446 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const { 447 return region_in_collection_set(r->region_number()); 448 } 449 450 template <class T> 451 inline bool ShenandoahHeap::in_collection_set(T p) const { 452 HeapWord* obj = (HeapWord*) p; 453 assert(collection_set() != NULL, "Sanity"); 454 assert(is_in(obj), "should be in heap"); 455 456 return collection_set()->is_in(obj); 457 } 458 459 inline bool ShenandoahHeap::concurrent_mark_in_progress() const { 460 return _concurrent_mark_in_progress != 0; 461 } 462 463 inline bool ShenandoahHeap::is_concurrent_partial_in_progress() const { 464 return _concurrent_partial_in_progress; 465 } 466 467 inline address ShenandoahHeap::update_refs_in_progress_addr() { 468 return (address) &(ShenandoahHeap::heap()->_update_refs_in_progress); 469 } 470 471 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 472 return _evacuation_in_progress != 0; 473 } 474 475 inline address ShenandoahHeap::evacuation_in_progress_addr() { 476 return (address) &(ShenandoahHeap::heap()->_evacuation_in_progress); 477 } 478 479 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const { 480 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); 481 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index]; 482 bool alloc_after_mark_start = addr >= top_at_mark_start; 483 return alloc_after_mark_start; 484 } 485 486 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const { 487 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); 488 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index]; 489 bool alloc_after_mark_start = addr >= top_at_mark_start; 490 return alloc_after_mark_start; 491 } 492 493 template<class T> 494 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 495 marked_object_iterate(region, cl, region->top()); 496 } 497 498 template<class T> 499 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) { 500 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit()); 501 } 502 503 template<class T> 504 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 505 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj"); 506 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 507 508 MarkBitMap* mark_bit_map = _complete_mark_bit_map; 509 HeapWord* tams = complete_top_at_mark_start(region->bottom()); 510 511 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1; 512 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */; 513 HeapWord* start = region->bottom() + BrooksPointer::word_size(); 514 HeapWord* end = MIN2(tams + BrooksPointer::word_size(), region->end()); 515 516 // Step 1. Scan below the TAMS based on bitmap data. 517 HeapWord* limit_bitmap = MIN2(limit, tams); 518 519 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 520 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 521 HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end); 522 523 intx dist = ShenandoahMarkScanPrefetch; 524 if (dist > 0) { 525 // Batched scan that prefetches the oop data, anticipating the access to 526 // either header, oop field, or forwarding pointer. Not that we cannot 527 // touch anything in oop, while it still being prefetched to get enough 528 // time for prefetch to work. This is why we try to scan the bitmap linearly, 529 // disregarding the object size. However, since we know forwarding pointer 530 // preceeds the object, we can skip over it. Once we cannot trust the bitmap, 531 // there is no point for prefetching the oop contents, as oop->size() will 532 // touch it prematurely. 533 534 // No variable-length arrays in standard C++, have enough slots to fit 535 // the prefetch distance. 536 static const int SLOT_COUNT = 256; 537 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 538 HeapWord* slots[SLOT_COUNT]; 539 540 int avail; 541 do { 542 avail = 0; 543 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 544 Prefetch::read(cb, BrooksPointer::byte_offset()); 545 slots[avail++] = cb; 546 cb += skip_bitmap_delta; 547 if (cb < limit_bitmap) { 548 cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); 549 } 550 } 551 552 for (int c = 0; c < avail; c++) { 553 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 554 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 555 oop obj = oop(slots[c]); 556 do_marked_object(mark_bit_map, cl, obj); 557 } 558 } while (avail > 0); 559 } else { 560 while (cb < limit_bitmap) { 561 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 562 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 563 oop obj = oop(cb); 564 do_marked_object(mark_bit_map, cl, obj); 565 cb += skip_bitmap_delta; 566 if (cb < limit_bitmap) { 567 cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); 568 } 569 } 570 } 571 572 // Step 2. Accurate size-based traversal, happens past the TAMS. 573 // This restarts the scan at TAMS, which makes sure we traverse all objects, 574 // regardless of what happened at Step 1. 575 HeapWord* cs = tams + BrooksPointer::word_size(); 576 while (cs < limit) { 577 assert (cs > tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 578 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 579 oop obj = oop(cs); 580 int size = obj->size(); 581 do_marked_object(mark_bit_map, cl, obj); 582 cs += size + skip_objsize_delta; 583 } 584 } 585 586 template<class T> 587 inline void ShenandoahHeap::do_marked_object(MarkBitMap* bitmap, T* cl, oop obj) { 588 assert(!oopDesc::is_null(obj), "sanity"); 589 assert(oopDesc::is_oop(obj), "sanity"); 590 assert(is_in(obj), "sanity"); 591 assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap"); 592 assert(is_marked_complete(obj), "object expected to be marked"); 593 cl->do_object(obj); 594 } 595 596 template <class T> 597 class ShenandoahObjectToOopClosure : public ObjectClosure { 598 T* _cl; 599 public: 600 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 601 602 void do_object(oop obj) { 603 obj->oop_iterate(_cl); 604 } 605 }; 606 607 template <class T> 608 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 609 T* _cl; 610 MemRegion _bounds; 611 public: 612 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 613 _cl(cl), _bounds(bottom, top) {} 614 615 void do_object(oop obj) { 616 obj->oop_iterate(_cl, _bounds); 617 } 618 }; 619 620 template<class T> 621 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 622 if (region->is_humongous()) { 623 HeapWord* bottom = region->bottom(); 624 if (top > bottom) { 625 region = region->humongous_start_region(); 626 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 627 marked_object_iterate(region, &objs); 628 } 629 } else { 630 ShenandoahObjectToOopClosure<T> objs(cl); 631 marked_object_iterate(region, &objs, top); 632 } 633 } 634 635 template<class T> 636 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl) { 637 marked_object_oop_iterate(region, cl, region->top()); 638 } 639 640 template<class T> 641 inline void ShenandoahHeap::marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl) { 642 marked_object_oop_iterate(region, cl, region->concurrent_iteration_safe_limit()); 643 } 644 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP