1 /* 2 * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP 26 27 #include "gc/shared/cmBitMap.inline.hpp" 28 #include "gc/shared/threadLocalAllocBuffer.inline.hpp" 29 #include "gc/shenandoah/brooksPointer.inline.hpp" 30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 31 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp" 32 #include "gc/shenandoah/shenandoahHeap.hpp" 33 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 34 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/prefetch.hpp" 38 #include "runtime/prefetch.inline.hpp" 39 #include "utilities/copy.hpp" 40 41 template <class T> 42 void SCMUpdateRefsClosure::do_oop_work(T* p) { 43 T o = oopDesc::load_heap_oop(p); 44 if (! oopDesc::is_null(o)) { 45 oop obj = oopDesc::decode_heap_oop_not_null(o); 46 _heap->update_oop_ref_not_null(p, obj); 47 } 48 } 49 50 void SCMUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); } 51 void SCMUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); } 52 53 /* 54 * Marks the object. Returns true if the object has not been marked before and has 55 * been marked by this thread. Returns false if the object has already been marked, 56 * or if a competing thread succeeded in marking this object. 57 */ 58 inline bool ShenandoahHeap::mark_next(oop obj) const { 59 #ifdef ASSERT 60 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) { 61 tty->print_cr("heap region containing obj:"); 62 ShenandoahHeapRegion* obj_region = heap_region_containing(obj); 63 obj_region->print(); 64 tty->print_cr("heap region containing forwardee:"); 65 ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj)); 66 forward_region->print(); 67 } 68 #endif 69 70 assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects"); 71 return mark_next_no_checks(obj); 72 } 73 74 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const { 75 HeapWord* addr = (HeapWord*) obj; 76 return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr); 77 } 78 79 inline bool ShenandoahHeap::is_marked_next(oop obj) const { 80 HeapWord* addr = (HeapWord*) obj; 81 return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr); 82 } 83 84 inline bool ShenandoahHeap::is_marked_complete(oop obj) const { 85 HeapWord* addr = (HeapWord*) obj; 86 return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr); 87 } 88 89 inline bool ShenandoahHeap::need_update_refs() const { 90 return _need_update_refs; 91 } 92 93 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { 94 uintptr_t region_start = ((uintptr_t) addr); 95 uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::region_size_shift(); 96 #ifdef ASSERT 97 if (!(index < _num_regions)) { 98 tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT \ 99 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, 100 p2i(_first_region_bottom), 101 p2i(_ordered_regions->get(0)->bottom()), 102 _num_regions, 103 ShenandoahHeapRegion::region_size_bytes()); 104 } 105 #endif 106 assert(index < _num_regions, "heap region index must be in range"); 107 return index; 108 } 109 110 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const { 111 size_t index = heap_region_index_containing(addr); 112 ShenandoahHeapRegion* result = _ordered_regions->get(index); 113 #ifdef ASSERT 114 if (!(addr >= result->bottom() && addr < result->end())) { 115 tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT \ 116 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, 117 p2i(_first_region_bottom), 118 p2i(_ordered_regions->get(0)->bottom()), 119 _num_regions); 120 } 121 #endif 122 assert(addr >= result->bottom() && addr < result->end(), "address must be in found region"); 123 return result; 124 } 125 126 template <class T> 127 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) { 128 if (in_collection_set(obj)) { 129 oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); 130 assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress() || cancelled_concgc(), "expect forwarded object"); 131 obj = forw; 132 oopDesc::encode_store_heap_oop(p, obj); 133 } 134 #ifdef ASSERT 135 else { 136 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded"); 137 } 138 #endif 139 return obj; 140 } 141 142 template <class T> 143 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) { 144 T o = oopDesc::load_heap_oop(p); 145 if (! oopDesc::is_null(o)) { 146 oop obj = oopDesc::decode_heap_oop_not_null(o); 147 return maybe_update_oop_ref_not_null(p, obj); 148 } else { 149 return NULL; 150 } 151 } 152 153 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) { 154 return (oop) Atomic::cmpxchg_ptr(n, addr, c); 155 } 156 157 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) { 158 narrowOop cmp = oopDesc::encode_heap_oop(c); 159 narrowOop val = oopDesc::encode_heap_oop(n); 160 return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp)); 161 } 162 163 template <class T> 164 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) { 165 166 assert((! is_in(p)) || (! in_collection_set(p)) 167 || is_full_gc_in_progress(), 168 "never update refs in from-space, unless evacuation has been cancelled"); 169 170 #ifdef ASSERT 171 if (! is_in(heap_oop)) { 172 print_heap_regions(); 173 tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p)); 174 assert(is_in(heap_oop), "object must be in heap"); 175 } 176 #endif 177 assert(is_in(heap_oop), "only ever call this on objects in the heap"); 178 if (in_collection_set(heap_oop)) { 179 oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr 180 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) { 181 // E.g. during evacuation. 182 return forwarded_oop; 183 } 184 185 assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object"); 186 187 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, 188 p2i(p), p2i(heap_oop), p2i(forwarded_oop)); 189 190 assert(forwarded_oop->is_oop(), "oop required"); 191 assert(is_in(forwarded_oop), "forwardee must be in heap"); 192 assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set"); 193 // If this fails, another thread wrote to p before us, it will be logged in SATB and the 194 // reference be updated later. 195 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); 196 197 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful. 198 return forwarded_oop; 199 } else { 200 assert(oopDesc::is_null(result) || 201 oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)), 202 "expect not forwarded"); 203 return NULL; 204 } 205 } else { 206 assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)), 207 "expect not forwarded"); 208 return heap_oop; 209 } 210 } 211 212 inline bool ShenandoahHeap::cancelled_concgc() const { 213 return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == 1; 214 } 215 216 inline bool ShenandoahHeap::try_cancel_concgc() { 217 return Atomic::cmpxchg(1, &_cancelled_concgc, 0) == 0; 218 } 219 220 inline void ShenandoahHeap::clear_cancelled_concgc() { 221 OrderAccess::release_store_fence(&_cancelled_concgc, 0); 222 } 223 224 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { 225 if (UseTLAB) { 226 HeapWord* obj = thread->gclab().allocate(size); 227 if (obj != NULL) { 228 return obj; 229 } 230 // Otherwise... 231 return allocate_from_gclab_slow(thread, size); 232 } else { 233 return NULL; 234 } 235 } 236 237 class UpdateMatrixClosure : public ExtendedOopClosure { 238 239 private: 240 ShenandoahHeap* _heap; 241 ShenandoahConnectionMatrix* _matrix; 242 243 template <class T> 244 inline void do_oop_nv(T* o) { 245 T t = oopDesc::load_heap_oop(o); 246 if (! oopDesc::is_null(t)) { 247 oop obj = oopDesc::decode_heap_oop_not_null(t); 248 _matrix->set_connected(o, obj); 249 } 250 } 251 252 public: 253 254 UpdateMatrixClosure() : 255 _heap(ShenandoahHeap::heap()), 256 _matrix(ShenandoahHeap::heap()->connection_matrix()) { 257 } 258 259 void do_oop(oop* o) { 260 do_oop_nv(o); 261 } 262 263 void do_oop(narrowOop* o) { 264 do_oop_nv(o); 265 } 266 }; 267 268 inline void ShenandoahHeap::copy_object(oop p, HeapWord* s, size_t words) { 269 assert(s != NULL, "allocation of brooks pointer must not fail"); 270 HeapWord* copy = s + BrooksPointer::word_size(); 271 272 guarantee(copy != NULL, "allocation of copy object must not fail"); 273 Copy::aligned_disjoint_words((HeapWord*) p, copy, words); 274 BrooksPointer::initialize(oop(copy)); 275 276 log_develop_trace(gc, compaction)("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy)); 277 } 278 279 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) { 280 size_t required; 281 282 evacuated = false; 283 284 #ifdef ASSERT 285 ShenandoahHeapRegion* hr = NULL; 286 if (ShenandoahVerifyReadsToFromSpace) { 287 hr = heap_region_containing(p); 288 { 289 hr->memProtectionOff(); 290 required = BrooksPointer::word_size() + p->size(); 291 hr->memProtectionOn(); 292 } 293 } else { 294 required = BrooksPointer::word_size() + p->size(); 295 } 296 #else 297 required = BrooksPointer::word_size() + p->size(); 298 #endif 299 300 assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); 301 302 bool alloc_from_gclab = true; 303 HeapWord* filler = allocate_from_gclab(thread, required); 304 if (filler == NULL) { 305 filler = allocate_memory(required, true); 306 alloc_from_gclab = false; 307 } 308 309 #ifdef ASSERT 310 // Checking that current Java thread does not hold Threads_lock when we get here. 311 // If that ever be the case, we'd deadlock in oom_during_evacuation. 312 if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) { 313 assert(! Threads_lock->owned_by_self() 314 || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here"); 315 } 316 #endif 317 318 if (filler == NULL) { 319 oom_during_evacuation(); 320 // If this is a Java thread, it should have waited 321 // until all GC threads are done, and then we 322 // return the forwardee. 323 oop resolved = ShenandoahBarrierSet::resolve_oop_static(p); 324 return resolved; 325 } 326 327 HeapWord* copy = filler + BrooksPointer::word_size(); 328 329 #ifdef ASSERT 330 if (ShenandoahVerifyReadsToFromSpace) { 331 hr->memProtectionOff(); 332 copy_object(p, filler, required - BrooksPointer::word_size()); 333 hr->memProtectionOn(); 334 } else { 335 copy_object(p, filler, required - BrooksPointer::word_size()); 336 } 337 #else 338 copy_object(p, filler, required - BrooksPointer::word_size()); 339 #endif 340 341 oop copy_val = oop(copy); 342 oop result = BrooksPointer::try_update_forwardee(p, copy_val); 343 344 oop return_val; 345 if (oopDesc::unsafe_equals(result, p)) { 346 evacuated = true; 347 return_val = copy_val; 348 349 log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", 350 p2i((HeapWord*) p), p2i(copy)); 351 352 #ifdef ASSERT 353 assert(return_val->is_oop(), "expect oop"); 354 assert(p->klass() == return_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, 355 p2i((HeapWord*) p), p2i((HeapWord*) copy)); 356 357 if (UseShenandoahMatrix) { 358 UpdateMatrixClosure cl; 359 copy_val->oop_iterate(&cl); 360 } 361 #endif 362 } else { 363 if (alloc_from_gclab) { 364 thread->gclab().rollback(required); 365 } 366 log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, 367 p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result)); 368 return_val = result; 369 } 370 371 return return_val; 372 } 373 374 inline bool ShenandoahHeap::requires_marking(const void* entry) const { 375 return ! is_marked_next(oop(entry)); 376 } 377 378 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const { 379 return _in_cset_fast_test_base[region_index]; 380 } 381 382 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const { 383 return region_in_collection_set(r->region_number()); 384 } 385 386 template <class T> 387 inline bool ShenandoahHeap::in_collection_set(T p) const { 388 HeapWord* obj = (HeapWord*) p; 389 assert(_in_cset_fast_test != NULL, "sanity"); 390 assert(is_in(obj), "should be in heap"); 391 392 // no need to subtract the bottom of the heap from obj, 393 // _in_cset_fast_test is biased 394 uintx index = ((uintx) obj) >> ShenandoahHeapRegion::region_size_shift(); 395 return _in_cset_fast_test[index]; 396 } 397 398 inline bool ShenandoahHeap::concurrent_mark_in_progress() { 399 return _concurrent_mark_in_progress != 0; 400 } 401 402 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() { 403 return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress); 404 } 405 406 inline bool ShenandoahHeap::is_evacuation_in_progress() { 407 return _evacuation_in_progress != 0; 408 } 409 410 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const { 411 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift(); 412 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index]; 413 bool alloc_after_mark_start = addr >= top_at_mark_start; 414 return alloc_after_mark_start; 415 } 416 417 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const { 418 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift(); 419 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index]; 420 bool alloc_after_mark_start = addr >= top_at_mark_start; 421 return alloc_after_mark_start; 422 } 423 424 template<class T> 425 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { 426 marked_object_iterate(region, cl, region->top()); 427 } 428 429 template<class T> 430 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 431 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj"); 432 433 assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); 434 435 CMBitMap* mark_bit_map = _complete_mark_bit_map; 436 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom()); 437 438 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1; 439 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */; 440 HeapWord* start = region->bottom() + BrooksPointer::word_size(); 441 442 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end()); 443 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end); 444 445 intx dist = ShenandoahMarkScanPrefetch; 446 if (dist > 0) { 447 // Batched scan that prefetches the oop data, anticipating the access to 448 // either header, oop field, or forwarding pointer. Not that we cannot 449 // touch anything in oop, while it still being prefetched to get enough 450 // time for prefetch to work. This is why we try to scan the bitmap linearly, 451 // disregarding the object size. However, since we know forwarding pointer 452 // preceeds the object, we can skip over it. Once we cannot trust the bitmap, 453 // there is no point for prefetching the oop contents, as oop->size() will 454 // touch it prematurely. 455 456 // No variable-length arrays in standard C++, have enough slots to fit 457 // the prefetch distance. 458 static const int SLOT_COUNT = 256; 459 guarantee(dist <= SLOT_COUNT, "adjust slot count"); 460 oop slots[SLOT_COUNT]; 461 462 bool aborting = false; 463 int avail; 464 do { 465 avail = 0; 466 for (int c = 0; (c < dist) && (addr < limit); c++) { 467 Prefetch::read(addr, 1); 468 oop obj = oop(addr); 469 slots[avail++] = obj; 470 if (addr < top_at_mark_start) { 471 addr += skip_bitmap_delta; 472 addr = mark_bit_map->getNextMarkedWordAddress(addr, end); 473 } else { 474 // cannot trust mark bitmap anymore, finish the current stride, 475 // and switch to accurate traversal 476 addr += obj->size() + skip_objsize_delta; 477 aborting = true; 478 } 479 } 480 481 for (int c = 0; c < avail; c++) { 482 do_marked_object(mark_bit_map, cl, slots[c]); 483 } 484 } while (avail > 0 && !aborting); 485 486 // accurate traversal 487 while (addr < limit) { 488 oop obj = oop(addr); 489 int size = obj->size(); 490 do_marked_object(mark_bit_map, cl, obj); 491 addr += size + skip_objsize_delta; 492 } 493 } else { 494 while (addr < limit) { 495 oop obj = oop(addr); 496 int size = obj->size(); 497 do_marked_object(mark_bit_map, cl, obj); 498 addr += size + skip_objsize_delta; 499 if (addr < top_at_mark_start) { 500 addr = mark_bit_map->getNextMarkedWordAddress(addr, end); 501 } 502 } 503 } 504 } 505 506 template<class T> 507 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) { 508 #ifdef ASSERT 509 assert(!oopDesc::is_null(obj), "sanity"); 510 assert(obj->is_oop(), "sanity"); 511 assert(is_in(obj), "sanity"); 512 assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap"); 513 assert(is_marked_complete(obj), "object expected to be marked"); 514 #endif 515 cl->do_object(obj); 516 } 517 518 template <class T> 519 class ShenandoahObjectToOopClosure : public ObjectClosure { 520 T* _cl; 521 public: 522 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} 523 524 void do_object(oop obj) { 525 obj->oop_iterate(_cl); 526 } 527 }; 528 529 template<class T> 530 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl) { 531 ShenandoahObjectToOopClosure<T> objs(cl); 532 marked_object_iterate(region, &objs); 533 } 534 535 template<class T> 536 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { 537 ShenandoahObjectToOopClosure<T> objs(cl); 538 marked_object_iterate(region, &objs, limit); 539 } 540 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP