1 /* 2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 25 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 27 #include "classfile/javaClasses.inline.hpp" 28 #include "gc/shared/markBitMap.inline.hpp" 29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 30 #include "gc/shared/suspendibleThreadSet.hpp" 31 #include "gc/shenandoah/shenandoahAsserts.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 33 #include "gc/shenandoah/shenandoahBrooksPointer.inline.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" 35 #include "gc/shenandoah/shenandoahWorkGroup.hpp" 36 #include "gc/shenandoah/shenandoahHeap.hpp" 37 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 38 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 39 #include "gc/shenandoah/shenandoahControlThread.hpp" 40 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 41 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/atomic.hpp" 44 #include "runtime/prefetch.inline.hpp" 45 #include "runtime/thread.hpp" 46 #include "utilities/copy.hpp" 47 #include "utilities/globalDefinitions.hpp" 48 49 template <class T> 50 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) { 51 T o = RawAccess<>::oop_load(p); 52 if (!CompressedOops::is_null(o)) { 53 oop obj = CompressedOops::decode_not_null(o); 54 _heap->update_with_forwarded_not_null(p, obj); 55 } 56 } 57 58 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); } 59 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); } 60 61 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { 62 size_t new_index = Atomic::add((size_t) 1, &_index); 63 // get_region() provides the bounds-check and returns NULL on OOB. 64 return _heap->get_region(new_index - 1); 65 } 66 67 inline bool ShenandoahHeap::has_forwarded_objects() const { 68 return _gc_state.is_set(HAS_FORWARDED); 69 } 70 71 inline WorkGang* ShenandoahHeap::workers() const { 72 return _workers; 73 } 74 75 inline WorkGang* ShenandoahHeap::get_safepoint_workers() { 76 return _safepoint_workers; 77 } 78 79 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 80 uintptr_t region_start = ((uintptr_t) addr); 81 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); 82 assert(index < num_regions(), "Region index is in bounds: " PTR_FORMAT, p2i(addr)); 83 return index; 84 } 85 86 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { 87 size_t index = heap_region_index_containing(addr); 88 ShenandoahHeapRegion* const result = get_region(index); 89 assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); 90 return result; 91 } 92 93 template <class T> 94 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) { 95 if (in_collection_set(obj)) { 96 shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress()); 97 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 98 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 99 } 100 #ifdef ASSERT 101 else { 102 shenandoah_assert_not_forwarded(p, obj); 103 } 104 #endif 105 return obj; 106 } 107 108 template <class T> 109 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) { 110 T o = RawAccess<>::oop_load(p); 111 if (!CompressedOops::is_null(o)) { 112 oop obj = CompressedOops::decode_not_null(o); 113 return maybe_update_with_forwarded_not_null(p, obj); 114 } else { 115 return NULL; 116 } 117 } 118 119 template <class T> 120 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { 121 T o = RawAccess<>::oop_load(p); 122 if (!CompressedOops::is_null(o)) { 123 oop heap_oop = CompressedOops::decode_not_null(o); 124 if (in_collection_set(heap_oop)) { 125 oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); 126 if (oopDesc::equals_raw(forwarded_oop, heap_oop)) { 127 forwarded_oop = evacuate_object(heap_oop, Thread::current()); 128 } 129 oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 130 if (oopDesc::equals_raw(prev, heap_oop)) { 131 return forwarded_oop; 132 } else { 133 return NULL; 134 } 135 } 136 return heap_oop; 137 } else { 138 return NULL; 139 } 140 } 141 142 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) { 143 return (oop) Atomic::cmpxchg(n, addr, c); 144 } 145 146 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) { 147 narrowOop cmp = CompressedOops::encode(c); 148 narrowOop val = CompressedOops::encode(n); 149 return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp)); 150 } 151 152 template <class T> 153 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) { 154 shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress()); 155 shenandoah_assert_correct(p, heap_oop); 156 157 if (in_collection_set(heap_oop)) { 158 oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); 159 if (oopDesc::equals_raw(forwarded_oop, heap_oop)) { 160 // E.g. during evacuation. 161 return forwarded_oop; 162 } 163 164 shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress()); 165 shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc()); 166 167 // If this fails, another thread wrote to p before us, it will be logged in SATB and the 168 // reference be updated later. 169 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 170 171 if (oopDesc::equals_raw(result, heap_oop)) { // CAS successful. 172 return forwarded_oop; 173 } else { 174 // Note: we used to assert the following here. This doesn't work because sometimes, during 175 // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy, 176 // which first copies the array, which potentially contains from-space refs, and only afterwards 177 // updates all from-space refs to to-space refs, which leaves a short window where the new array 178 // elements can be from-space. 179 // assert(CompressedOops::is_null(result) || 180 // oopDesc::equals_raw(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)), 181 // "expect not forwarded"); 182 return NULL; 183 } 184 } else { 185 shenandoah_assert_not_forwarded(p, heap_oop); 186 return heap_oop; 187 } 188 } 189 190 inline bool ShenandoahHeap::cancelled_gc() const { 191 return _cancelled_gc.get() == CANCELLED; 192 } 193 194 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) { 195 if (! (sts_active && ShenandoahSuspendibleWorkers)) { 196 return cancelled_gc(); 197 } 198 199 jbyte prev = _cancelled_gc.cmpxchg(NOT_CANCELLED, CANCELLABLE); 200 if (prev == CANCELLABLE || prev == NOT_CANCELLED) { 201 if (SuspendibleThreadSet::should_yield()) { 202 SuspendibleThreadSet::yield(); 203 } 204 205 // Back to CANCELLABLE. The thread that poked NOT_CANCELLED first gets 206 // to restore to CANCELLABLE. 207 if (prev == CANCELLABLE) { 208 _cancelled_gc.set(CANCELLABLE); 209 } 210 return false; 211 } else { 212 return true; 213 } 214 } 215 216 inline void ShenandoahHeap::clear_cancelled_gc() { 217 _cancelled_gc.set(CANCELLABLE); 218 _oom_evac_handler.clear(); 219 } 220 221 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 222 assert(UseTLAB, "TLABs should be enabled"); 223 224 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); 225 if (gclab == NULL) { 226 assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), 227 "Performance: thread should have GCLAB: %s", thread->name()); 228 // No GCLABs in this thread, fallback to shared allocation 229 return NULL; 230 } 231 HeapWord* obj = gclab->allocate(size); 232 if (obj != NULL) { 233 return obj; 234 } 235 // Otherwise... 236 return allocate_from_gclab_slow(thread, size); 237 } 238 239 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { 240 if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { 241 // This thread went through the OOM during evac protocol and it is safe to return 242 // the forward pointer. It must not attempt to evacuate any more. 243 return ShenandoahBarrierSet::resolve_forwarded(p); 244 } 245 246 assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope"); 247 248 size_t size_no_fwdptr = (size_t) p->size(); 249 size_t size_with_fwdptr = size_no_fwdptr + ShenandoahBrooksPointer::word_size(); 250 251 assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 252 253 bool alloc_from_gclab = true; 254 HeapWord* filler = NULL; 255 256 #ifdef ASSERT 257 if (ShenandoahOOMDuringEvacALot && 258 (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call 259 filler = NULL; 260 } else { 261 #endif 262 if (UseTLAB) { 263 filler = allocate_from_gclab(thread, size_with_fwdptr); 264 } 265 if (filler == NULL) { 266 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size_with_fwdptr); 267 filler = allocate_memory(req); 268 alloc_from_gclab = false; 269 } 270 #ifdef ASSERT 271 } 272 #endif 273 274 if (filler == NULL) { 275 control_thread()->handle_alloc_failure_evac(size_with_fwdptr); 276 277 _oom_evac_handler.handle_out_of_memory_during_evacuation(); 278 279 return ShenandoahBarrierSet::resolve_forwarded(p); 280 } 281 282 // Copy the object and initialize its forwarding ptr: 283 HeapWord* copy = filler + ShenandoahBrooksPointer::word_size(); 284 oop copy_val = oop(copy); 285 286 Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr); 287 ShenandoahBrooksPointer::initialize(oop(copy)); 288 289 // Try to install the new forwarding pointer. 290 oop result = ShenandoahBrooksPointer::try_update_forwardee(p, copy_val); 291 292 if (oopDesc::equals_raw(result, p)) { 293 // Successfully evacuated. Our copy is now the public one! 294 shenandoah_assert_correct(NULL, copy_val); 295 return copy_val; 296 } else { 297 // Failed to evacuate. We need to deal with the object that is left behind. Since this 298 // new allocation is certainly after TAMS, it will be considered live in the next cycle. 299 // But if it happens to contain references to evacuated regions, those references would 300 // not get updated for this stale copy during this cycle, and we will crash while scanning 301 // it the next cycle. 302 // 303 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next 304 // object will overwrite this stale copy, or the filler object on LAB retirement will 305 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and 306 // have to explicitly overwrite the copy with the filler object. With that overwrite, 307 // we have to keep the fwdptr initialized and pointing to our (stale) copy. 308 if (alloc_from_gclab) { 309 ShenandoahThreadLocalData::gclab(thread)->undo_allocation(filler, size_with_fwdptr); 310 } else { 311 fill_with_object(copy, size_no_fwdptr); 312 } 313 shenandoah_assert_correct(NULL, copy_val); 314 shenandoah_assert_correct(NULL, result); 315 return result; 316 } 317 } 318 319 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 320 return !_marking_context->is_marked(oop(entry)); 321 } 322 323 template <class T> 324 inline bool ShenandoahHeap::in_collection_set(T p) const { 325 HeapWord* obj = (HeapWord*) p; 326 assert(collection_set() != NULL, "Sanity"); 327 assert(is_in(obj), "should be in heap"); 328 329 return collection_set()->is_in(obj); 330 } 331 332 inline bool ShenandoahHeap::is_stable() const { 333 return _gc_state.is_clear(); 334 } 335 336 inline bool ShenandoahHeap::is_idle() const { 337 return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL); 338 } 339 340 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { 341 return _gc_state.is_set(MARKING); 342 } 343 344 inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const { 345 return _gc_state.is_set(TRAVERSAL); 346 } 347 348 inline bool ShenandoahHeap::is_evacuation_in_progress() const { 349 return _gc_state.is_set(EVACUATION); 350 } 351 352 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { 353 return _gc_state.is_set(mask); 354 } 355 356 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { 357 return _degenerated_gc_in_progress.is_set(); 358 } 359 360 inline bool ShenandoahHeap::is_full_gc_in_progress() const { 361 return _full_gc_in_progress.is_set(); 362 } 363 364 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { 365 return _full_gc_move_in_progress.is_set(); 366 } 367 368 inline bool ShenandoahHeap::is_update_refs_in_progress() const { 369 return _gc_state.is_set(UPDATEREFS); 370 } 371 372 template<class T> 373 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 374 marked_object_iterate(region, cl, region->top()); 375 } 376 377 template<class T> 378 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 379 assert(ShenandoahBrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj"); 380 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 381 382 ShenandoahMarkingContext* const ctx = complete_marking_context(); 383 assert(ctx->is_complete(), "sanity"); 384 385 MarkBitMap* mark_bit_map = ctx->mark_bit_map(); 386 HeapWord* tams = ctx->top_at_mark_start(region); 387 388 size_t skip_bitmap_delta = ShenandoahBrooksPointer::word_size() + 1; 389 size_t skip_objsize_delta = ShenandoahBrooksPointer::word_size() /* + actual obj.size() below */; 390 HeapWord* start = region->bottom() + ShenandoahBrooksPointer::word_size(); 391 HeapWord* end = MIN2(tams + ShenandoahBrooksPointer::word_size(), region->end()); 392 393 // Step 1. Scan below the TAMS based on bitmap data. 394 HeapWord* limit_bitmap = MIN2(limit, tams); 395 396 // Try to scan the initial candidate. If the candidate is above the TAMS, it would 397 // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. 398 HeapWord* cb = mark_bit_map->get_next_marked_addr(start, end); 399 400 intx dist = ShenandoahMarkScanPrefetch; 401 if (dist > 0) { 402 // Batched scan that prefetches the oop data, anticipating the access to 403 // either header, oop field, or forwarding pointer. Not that we cannot 404 // touch anything in oop, while it still being prefetched to get enough 405 // time for prefetch to work. This is why we try to scan the bitmap linearly, 406 // disregarding the object size. However, since we know forwarding pointer 407 // preceeds the object, we can skip over it. Once we cannot trust the bitmap, 408 // there is no point for prefetching the oop contents, as oop->size() will 409 // touch it prematurely. 410 411 // No variable-length arrays in standard C++, have enough slots to fit 412 // the prefetch distance. 413 static const int SLOT_COUNT = 256; 414 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 415 HeapWord* slots[SLOT_COUNT]; 416 417 int avail; 418 do { 419 avail = 0; 420 for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { 421 Prefetch::read(cb, ShenandoahBrooksPointer::byte_offset()); 422 slots[avail++] = cb; 423 cb += skip_bitmap_delta; 424 if (cb < limit_bitmap) { 425 cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap); 426 } 427 } 428 429 for (int c = 0; c < avail; c++) { 430 assert (slots[c] < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams)); 431 assert (slots[c] < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit)); 432 oop obj = oop(slots[c]); 433 assert(oopDesc::is_oop(obj), "sanity"); 434 assert(ctx->is_marked(obj), "object expected to be marked"); 435 cl->do_object(obj); 436 } 437 } while (avail > 0); 438 } else { 439 while (cb < limit_bitmap) { 440 assert (cb < tams, "only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams)); 441 assert (cb < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit)); 442 oop obj = oop(cb); 443 assert(oopDesc::is_oop(obj), "sanity"); 444 assert(ctx->is_marked(obj), "object expected to be marked"); 445 cl->do_object(obj); 446 cb += skip_bitmap_delta; 447 if (cb < limit_bitmap) { 448 cb = mark_bit_map->get_next_marked_addr(cb, limit_bitmap); 449 } 450 } 451 } 452 453 // Step 2. Accurate size-based traversal, happens past the TAMS. 454 // This restarts the scan at TAMS, which makes sure we traverse all objects, 455 // regardless of what happened at Step 1. 456 HeapWord* cs = tams + ShenandoahBrooksPointer::word_size(); 457 while (cs < limit) { 458 assert (cs > tams, "only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)); 459 assert (cs < limit, "only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)); 460 oop obj = oop(cs); 461 assert(oopDesc::is_oop(obj), "sanity"); 462 assert(ctx->is_marked(obj), "object expected to be marked"); 463 int size = obj->size(); 464 cl->do_object(obj); 465 cs += size + skip_objsize_delta; 466 } 467 } 468 469 template <class T> 470 class ShenandoahObjectToOopClosure : public ObjectClosure { 471 T* _cl; 472 public: 473 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 474 475 void do_object(oop obj) { 476 obj->oop_iterate(_cl); 477 } 478 }; 479 480 template <class T> 481 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { 482 T* _cl; 483 MemRegion _bounds; 484 public: 485 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : 486 _cl(cl), _bounds(bottom, top) {} 487 488 void do_object(oop obj) { 489 obj->oop_iterate(_cl, _bounds); 490 } 491 }; 492 493 template<class T> 494 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { 495 if (region->is_humongous()) { 496 HeapWord* bottom = region->bottom(); 497 if (top > bottom) { 498 region = region->humongous_start_region(); 499 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top); 500 marked_object_iterate(region, &objs); 501 } 502 } else { 503 ShenandoahObjectToOopClosure<T> objs(cl); 504 marked_object_iterate(region, &objs, top); 505 } 506 } 507 508 inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { 509 if (region_idx < _num_regions) { 510 return _regions[region_idx]; 511 } else { 512 return NULL; 513 } 514 } 515 516 inline void ShenandoahHeap::mark_complete_marking_context() { 517 _marking_context->mark_complete(); 518 } 519 520 inline void ShenandoahHeap::mark_incomplete_marking_context() { 521 _marking_context->mark_incomplete(); 522 } 523 524 inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { 525 assert (_marking_context->is_complete()," sanity"); 526 return _marking_context; 527 } 528 529 inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { 530 return _marking_context; 531 } 532 533 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP