< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp

Print this page
rev 10496 : [backport] Rename "cancel_concgc" to "cancel_gc"
rev 10538 : [backport] VSC++ requires space(s) in between two string literals
rev 10564 : [backport] Refactor allocation path to accept ShenandoahAllocRequest tuple
rev 10577 : [backport] -XX:-UseTLAB should disable GCLABs too
rev 10580 : [backport] Refactor to group marking bitmap and TAMS structure in one class ShenandoahMarkingContext
rev 10594 : [backport] Split write barrier paths for mutator and GC workers
rev 10613 : [backport] Remove obsolete/unused logging usages
rev 10614 : [backport] Replace custom asserts with shenandoah_assert_*
rev 10628 : [backport] Degenerated evacuation


  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc_implementation/shared/markBitMap.inline.hpp"
  28 #include "memory/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc_implementation/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"

  35 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/atomic.hpp"
  41 #include "runtime/prefetch.hpp"
  42 #include "runtime/prefetch.inline.hpp"
  43 #include "utilities/copy.hpp"

  44 
  45 template <class T>
  46 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
  47   T o = oopDesc::load_heap_oop(p);
  48   if (! oopDesc::is_null(o)) {
  49     oop obj = oopDesc::decode_heap_oop_not_null(o);
  50     _heap->update_with_forwarded_not_null(p, obj);
  51   }
  52 }
  53 
  54 void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
  55 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  56 
  57 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
  58   size_t new_index = Atomic::add((size_t) 1, &_index);
  59   // get_region() provides the bounds-check and returns NULL on OOB.
  60   return _heap->get_region(new_index - 1);
  61 }
  62 
  63 /*
  64  * Marks the object. Returns true if the object has not been marked before and has
  65  * been marked by this thread. Returns false if the object has already been marked,
  66  * or if a competing thread succeeded in marking this object.
  67  */
  68 inline bool ShenandoahHeap::mark_next(oop obj) const {
  69   shenandoah_assert_not_forwarded(NULL, obj);
  70   HeapWord* addr = (HeapWord*) obj;
  71   return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  72 }
  73 
  74 inline bool ShenandoahHeap::is_marked_next(oop obj) const {
  75   HeapWord* addr = (HeapWord*) obj;
  76   return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  77 }
  78 
  79 inline bool ShenandoahHeap::is_marked_complete(oop obj) const {
  80   HeapWord* addr = (HeapWord*) obj;
  81   return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr);
  82 }
  83 
  84 inline bool ShenandoahHeap::has_forwarded_objects() const {
  85   return _gc_state.is_set(HAS_FORWARDED);
  86 }
  87 
  88 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  89   uintptr_t region_start = ((uintptr_t) addr);
  90   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
  91   assert(index < num_regions(), err_msg("Region index is in bounds: " PTR_FORMAT, p2i(addr)));
  92   return index;
  93 }
  94 
  95 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
  96   size_t index = heap_region_index_containing(addr);
  97   ShenandoahHeapRegion* const result = get_region(index);
  98   assert(addr >= result->bottom() && addr < result->end(), err_msg("Heap region contains the address: " PTR_FORMAT, p2i(addr)));
  99   return result;
 100 }
 101 
 102 template <class T>
 103 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
 104   if (in_collection_set(obj)) {
 105     shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_concgc());
 106     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 107     oopDesc::encode_store_heap_oop(p, obj);
 108   }
 109 #ifdef ASSERT
 110   else {
 111     shenandoah_assert_not_forwarded(p, obj);
 112   }
 113 #endif
 114   return obj;
 115 }
 116 
 117 template <class T>
 118 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 119   T o = oopDesc::load_heap_oop(p);
 120   if (! oopDesc::is_null(o)) {
 121     oop obj = oopDesc::decode_heap_oop_not_null(o);
 122     return maybe_update_with_forwarded_not_null(p, obj);
 123   } else {
 124     return NULL;
 125   }
 126 }
 127 
 128 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 129   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 130 }
 131 
 132 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 133   narrowOop cmp = oopDesc::encode_heap_oop(c);
 134   narrowOop val = oopDesc::encode_heap_oop(n);
 135   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 136 }
 137 
 138 template <class T>
 139 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
 140   shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress());
 141   shenandoah_assert_correct(p, heap_oop);
 142 
 143   if (in_collection_set(heap_oop)) {
 144     oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 145 
 146     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress());
 147     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_concgc());
 148 
 149     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
 150                           p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 151 
 152     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 153     // reference be updated later.
 154     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 155 
 156     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 157       return forwarded_oop;
 158     } else {
 159       // Note: we used to assert the following here. This doesn't work because sometimes, during
 160       // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy,
 161       // which first copies the array, which potentially contains from-space refs, and only afterwards
 162       // updates all from-space refs to to-space refs, which leaves a short window where the new array
 163       // elements can be from-space.
 164       // assert(oopDesc::is_null(result) ||
 165       //        oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
 166       //       "expect not forwarded");
 167       return NULL;
 168     }
 169   } else {
 170     shenandoah_assert_not_forwarded(p, heap_oop);
 171     return heap_oop;
 172   }
 173 }
 174 
 175 inline bool ShenandoahHeap::cancelled_concgc() const {
 176   return _cancelled_concgc.is_set();
 177 }
 178 
 179 inline bool ShenandoahHeap::try_cancel_concgc() {
 180   return _cancelled_concgc.try_set();
 181 }
 182 
 183 inline void ShenandoahHeap::clear_cancelled_concgc() {
 184   _cancelled_concgc.unset();
 185   _oom_evac_handler.clear();
 186 }
 187 
 188 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 189   if (UseTLAB) {

 190     if (!thread->gclab().is_initialized()) {
 191       assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 192              err_msg("Performance: thread should have GCLAB: %s", thread->name()));
 193       // No GCLABs in this thread, fallback to shared allocation
 194       return NULL;
 195     }
 196     HeapWord* obj = thread->gclab().allocate(size);
 197     if (obj != NULL) {
 198       return obj;
 199     }
 200     // Otherwise...
 201     return allocate_from_gclab_slow(thread, size);
 202   } else {
 203     return NULL;
 204   }
 205 }
 206 
 207 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) {
 208   evacuated = false;
 209 
 210   if (Thread::current()->is_oom_during_evac()) {
 211     // This thread went through the OOM during evac protocol and it is safe to return
 212     // the forward pointer. It must not attempt to evacuate any more.
 213     return ShenandoahBarrierSet::resolve_forwarded(p);
 214   }
 215 


 216   size_t size_no_fwdptr = (size_t) p->size();
 217   size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size();
 218 
 219   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 220 
 221   bool alloc_from_gclab = true;
 222   HeapWord* filler;
 223 #ifdef ASSERT
 224 
 225   assert(thread->is_evac_allowed(), "must be enclosed in ShenandoahOOMDuringEvacHandler");
 226 

 227   if (ShenandoahOOMDuringEvacALot &&
 228       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 229         filler = NULL;
 230   } else {
 231 #endif

 232     filler = allocate_from_gclab(thread, size_with_fwdptr);

 233     if (filler == NULL) {
 234       filler = allocate_memory(size_with_fwdptr, _alloc_shared_gc);

 235       alloc_from_gclab = false;
 236     }
 237 #ifdef ASSERT
 238   }
 239 #endif
 240 
 241   if (filler == NULL) {
 242     control_thread()->handle_alloc_failure_evac(size_with_fwdptr);
 243 
 244     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 245 
 246     return ShenandoahBarrierSet::resolve_forwarded(p);
 247   }
 248 
 249   // Copy the object and initialize its forwarding ptr:
 250   HeapWord* copy = filler + BrooksPointer::word_size();
 251   oop copy_val = oop(copy);
 252 
 253   Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
 254   BrooksPointer::initialize(oop(copy));
 255 
 256   log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT,
 257                                     p2i(p), p2i(copy));
 258 
 259   // Try to install the new forwarding pointer.
 260   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 261 
 262   if (oopDesc::unsafe_equals(result, p)) {
 263     // Successfully evacuated. Our copy is now the public one!
 264     evacuated = true;
 265 
 266     log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " succeeded",
 267                                       p2i(p), p2i(copy));
 268 
 269 #ifdef ASSERT
 270     assert(copy_val->is_oop(), "expect oop");
 271     assert(p->klass() == copy_val->klass(), err_msg("Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 272                                                p2i(p), p2i(copy)));
 273 #endif
 274     return copy_val;
 275   }  else {
 276     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 277     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 278     // But if it happens to contain references to evacuated regions, those references would
 279     // not get updated for this stale copy during this cycle, and we will crash while scanning
 280     // it the next cycle.
 281     //
 282     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 283     // object will overwrite this stale copy, or the filler object on LAB retirement will
 284     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 285     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 286     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 287     if (alloc_from_gclab) {
 288       thread->gclab().rollback(size_with_fwdptr);
 289     } else {
 290       fill_with_object(copy, size_no_fwdptr);
 291     }
 292     log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
 293                                       p2i(p), p2i(copy), p2i(result));
 294     return result;
 295   }
 296 }
 297 
 298 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 299   return ! is_marked_next(oop(entry));
 300 }
 301 
 302 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 303   assert(collection_set() != NULL, "Sanity");
 304   return collection_set()->is_in(region_index);
 305 }
 306 
 307 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 308   return region_in_collection_set(r->region_number());
 309 }
 310 
 311 template <class T>
 312 inline bool ShenandoahHeap::in_collection_set(T p) const {
 313   HeapWord* obj = (HeapWord*) p;
 314   assert(collection_set() != NULL, "Sanity");
 315   assert(is_in(obj), "should be in heap");
 316 
 317   return collection_set()->is_in(obj);
 318 }
 319 


 336 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 337   return _gc_state.is_set(mask);
 338 }
 339 
 340 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 341   return _degenerated_gc_in_progress.is_set();
 342 }
 343 
 344 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 345   return _full_gc_in_progress.is_set();
 346 }
 347 
 348 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
 349   return _full_gc_move_in_progress.is_set();
 350 }
 351 
 352 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
 353   return _gc_state.is_set(UPDATEREFS);
 354 }
 355 
 356 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
 357   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
 358   HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
 359   bool alloc_after_mark_start = addr >= top_at_mark_start;
 360   return alloc_after_mark_start;
 361 }
 362 
 363 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
 364   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
 365   HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
 366   bool alloc_after_mark_start = addr >= top_at_mark_start;
 367   return alloc_after_mark_start;
 368 }
 369 
 370 template<class T>
 371 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 372   marked_object_iterate(region, cl, region->top());
 373 }
 374 
 375 template<class T>
 376 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
 377   marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
 378 }
 379 
 380 template<class T>
 381 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
 382   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 383 
 384   MarkBitMap* mark_bit_map = _complete_mark_bit_map;
 385   HeapWord* tams = complete_top_at_mark_start(region->bottom());

 386 
 387   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 388   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 389   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 390   HeapWord* end = MIN2(tams + BrooksPointer::word_size(), region->end());
 391 
 392   // Step 1. Scan below the TAMS based on bitmap data.
 393   HeapWord* limit_bitmap = MIN2(limit, tams);
 394 
 395   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
 396   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
 397   HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end);
 398 
 399   intx dist = ShenandoahMarkScanPrefetch;
 400   if (dist > 0) {
 401     // Batched scan that prefetches the oop data, anticipating the access to
 402     // either header, oop field, or forwarding pointer. Not that we cannot
 403     // touch anything in oop, while it still being prefetched to get enough
 404     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 405     // disregarding the object size. However, since we know forwarding pointer


 446   }
 447 
 448   // Step 2. Accurate size-based traversal, happens past the TAMS.
 449   // This restarts the scan at TAMS, which makes sure we traverse all objects,
 450   // regardless of what happened at Step 1.
 451   HeapWord* cs = tams + BrooksPointer::word_size();
 452   while (cs < limit) {
 453     assert (cs > tams,  err_msg("only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)));
 454     assert (cs < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)));
 455     oop obj = oop(cs);
 456     int size = obj->size();
 457     do_object_marked_complete(cl, obj);
 458     cs += size + skip_objsize_delta;
 459   }
 460 }
 461 
 462 template<class T>
 463 inline void ShenandoahHeap::do_object_marked_complete(T* cl, oop obj) {
 464   assert(!oopDesc::is_null(obj), "sanity");
 465   assert(obj->is_oop(), "sanity");
 466   assert(is_marked_complete(obj), "object expected to be marked");
 467   cl->do_object(obj);
 468 }
 469 
 470 template <class T>
 471 class ShenandoahObjectToOopClosure : public ObjectClosure {
 472   T* _cl;
 473 public:
 474   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
 475 
 476   void do_object(oop obj) {
 477     obj->oop_iterate(_cl);
 478   }
 479 };
 480 
 481 template <class T>
 482 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
 483   T* _cl;
 484   MemRegion _bounds;
 485 public:
 486   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :




  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc_implementation/shared/markBitMap.inline.hpp"
  28 #include "memory/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc_implementation/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/prefetch.hpp"
  43 #include "runtime/prefetch.inline.hpp"
  44 #include "utilities/copy.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 template <class T>
  48 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
  49   T o = oopDesc::load_heap_oop(p);
  50   if (! oopDesc::is_null(o)) {
  51     oop obj = oopDesc::decode_heap_oop_not_null(o);
  52     _heap->update_with_forwarded_not_null(p, obj);
  53   }
  54 }
  55 
  56 void ShenandoahUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
  57 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  58 
  59 inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
  60   size_t new_index = Atomic::add((size_t) 1, &_index);
  61   // get_region() provides the bounds-check and returns NULL on OOB.
  62   return _heap->get_region(new_index - 1);
  63 }
  64 





















  65 inline bool ShenandoahHeap::has_forwarded_objects() const {
  66   return _gc_state.is_set(HAS_FORWARDED);
  67 }
  68 
  69 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  70   uintptr_t region_start = ((uintptr_t) addr);
  71   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
  72   assert(index < num_regions(), err_msg("Region index is in bounds: " PTR_FORMAT, p2i(addr)));
  73   return index;
  74 }
  75 
  76 inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const {
  77   size_t index = heap_region_index_containing(addr);
  78   ShenandoahHeapRegion* const result = get_region(index);
  79   assert(addr >= result->bottom() && addr < result->end(), err_msg("Heap region contains the address: " PTR_FORMAT, p2i(addr)));
  80   return result;
  81 }
  82 
  83 template <class T>
  84 inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) {
  85   if (in_collection_set(obj)) {
  86     shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress());
  87     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
  88     oopDesc::encode_store_heap_oop(p, obj);
  89   }
  90 #ifdef ASSERT
  91   else {
  92     shenandoah_assert_not_forwarded(p, obj);
  93   }
  94 #endif
  95   return obj;
  96 }
  97 
  98 template <class T>
  99 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 100   T o = oopDesc::load_heap_oop(p);
 101   if (! oopDesc::is_null(o)) {
 102     oop obj = oopDesc::decode_heap_oop_not_null(o);
 103     return maybe_update_with_forwarded_not_null(p, obj);
 104   } else {
 105     return NULL;
 106   }
 107 }
 108 
 109 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 110   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 111 }
 112 
 113 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 114   narrowOop cmp = oopDesc::encode_heap_oop(c);
 115   narrowOop val = oopDesc::encode_heap_oop(n);
 116   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 117 }
 118 
 119 template <class T>
 120 inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) {
 121   shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 122   shenandoah_assert_correct(p, heap_oop);
 123 
 124   if (in_collection_set(heap_oop)) {
 125     oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 126 
 127     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 128     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());



 129 
 130     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 131     // reference be updated later.
 132     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 133 
 134     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 135       return forwarded_oop;
 136     } else {
 137       // Note: we used to assert the following here. This doesn't work because sometimes, during
 138       // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy,
 139       // which first copies the array, which potentially contains from-space refs, and only afterwards
 140       // updates all from-space refs to to-space refs, which leaves a short window where the new array
 141       // elements can be from-space.
 142       // assert(oopDesc::is_null(result) ||
 143       //        oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
 144       //       "expect not forwarded");
 145       return NULL;
 146     }
 147   } else {
 148     shenandoah_assert_not_forwarded(p, heap_oop);
 149     return heap_oop;
 150   }
 151 }
 152 
 153 inline bool ShenandoahHeap::cancelled_gc() const {
 154   return _cancelled_gc.is_set();
 155 }
 156 
 157 inline bool ShenandoahHeap::try_cancel_gc() {
 158   return _cancelled_gc.try_set();
 159 }
 160 
 161 inline void ShenandoahHeap::clear_cancelled_gc() {
 162   _cancelled_gc.unset();
 163   _oom_evac_handler.clear();
 164 }
 165 
 166 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 167   assert(UseTLAB, "TLABs should be enabled");
 168 
 169   if (!thread->gclab().is_initialized()) {
 170     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 171            err_msg("Performance: thread should have GCLAB: %s", thread->name()));
 172     // No GCLABs in this thread, fallback to shared allocation
 173     return NULL;
 174   }
 175   HeapWord *obj = thread->gclab().allocate(size);
 176   if (obj != NULL) {
 177     return obj;
 178   }
 179   // Otherwise...
 180   return allocate_from_gclab_slow(thread, size);



 181 }
 182 
 183 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) {
 184   evacuated = false;
 185 
 186   if (Thread::current()->is_oom_during_evac()) {
 187     // This thread went through the OOM during evac protocol and it is safe to return
 188     // the forward pointer. It must not attempt to evacuate any more.
 189     return ShenandoahBarrierSet::resolve_forwarded(p);
 190   }
 191 
 192   assert(thread->is_evac_allowed(), "must be enclosed in in oom-evac scope");
 193 
 194   size_t size_no_fwdptr = (size_t) p->size();
 195   size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size();
 196 
 197   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 198 
 199   bool alloc_from_gclab = true;
 200   HeapWord* filler = NULL;



 201 
 202 #ifdef ASSERT
 203   if (ShenandoahOOMDuringEvacALot &&
 204       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 205         filler = NULL;
 206   } else {
 207 #endif
 208     if (UseTLAB) {
 209       filler = allocate_from_gclab(thread, size_with_fwdptr);
 210     }
 211     if (filler == NULL) {
 212       ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_shared_gc(size_with_fwdptr);
 213       filler = allocate_memory(req);
 214       alloc_from_gclab = false;
 215     }
 216 #ifdef ASSERT
 217   }
 218 #endif
 219 
 220   if (filler == NULL) {
 221     control_thread()->handle_alloc_failure_evac(size_with_fwdptr);
 222 
 223     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 224 
 225     return ShenandoahBarrierSet::resolve_forwarded(p);
 226   }
 227 
 228   // Copy the object and initialize its forwarding ptr:
 229   HeapWord* copy = filler + BrooksPointer::word_size();
 230   oop copy_val = oop(copy);
 231 
 232   Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
 233   BrooksPointer::initialize(oop(copy));
 234 



 235   // Try to install the new forwarding pointer.
 236   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 237 
 238   if (oopDesc::unsafe_equals(result, p)) {
 239     // Successfully evacuated. Our copy is now the public one!
 240     evacuated = true;
 241     shenandoah_assert_correct(NULL, copy_val);








 242     return copy_val;
 243   }  else {
 244     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 245     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 246     // But if it happens to contain references to evacuated regions, those references would
 247     // not get updated for this stale copy during this cycle, and we will crash while scanning
 248     // it the next cycle.
 249     //
 250     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 251     // object will overwrite this stale copy, or the filler object on LAB retirement will
 252     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 253     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 254     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 255     if (alloc_from_gclab) {
 256       thread->gclab().rollback(size_with_fwdptr);
 257     } else {
 258       fill_with_object(copy, size_no_fwdptr);
 259     }
 260     shenandoah_assert_correct(NULL, copy_val);
 261     shenandoah_assert_correct(NULL, result);
 262     return result;
 263   }
 264 }
 265 
 266 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 267   return ! _next_marking_context->is_marked(oop(entry));
 268 }
 269 
 270 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 271   assert(collection_set() != NULL, "Sanity");
 272   return collection_set()->is_in(region_index);
 273 }
 274 
 275 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 276   return region_in_collection_set(r->region_number());
 277 }
 278 
 279 template <class T>
 280 inline bool ShenandoahHeap::in_collection_set(T p) const {
 281   HeapWord* obj = (HeapWord*) p;
 282   assert(collection_set() != NULL, "Sanity");
 283   assert(is_in(obj), "should be in heap");
 284 
 285   return collection_set()->is_in(obj);
 286 }
 287 


 304 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 305   return _gc_state.is_set(mask);
 306 }
 307 
 308 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 309   return _degenerated_gc_in_progress.is_set();
 310 }
 311 
 312 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 313   return _full_gc_in_progress.is_set();
 314 }
 315 
 316 inline bool ShenandoahHeap::is_full_gc_move_in_progress() const {
 317   return _full_gc_move_in_progress.is_set();
 318 }
 319 
 320 inline bool ShenandoahHeap::is_update_refs_in_progress() const {
 321   return _gc_state.is_set(UPDATEREFS);
 322 }
 323 














 324 template<class T>
 325 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 326   marked_object_iterate(region, cl, region->top());
 327 }
 328 
 329 template<class T>
 330 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
 331   marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
 332 }
 333 
 334 template<class T>
 335 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
 336   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 337 
 338   ShenandoahMarkingContext* const ctx = complete_marking_context();
 339   MarkBitMap* mark_bit_map = ctx->mark_bit_map();
 340   HeapWord* tams = ctx->top_at_mark_start(region->region_number());
 341 
 342   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 343   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 344   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 345   HeapWord* end = MIN2(tams + BrooksPointer::word_size(), region->end());
 346 
 347   // Step 1. Scan below the TAMS based on bitmap data.
 348   HeapWord* limit_bitmap = MIN2(limit, tams);
 349 
 350   // Try to scan the initial candidate. If the candidate is above the TAMS, it would
 351   // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2.
 352   HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end);
 353 
 354   intx dist = ShenandoahMarkScanPrefetch;
 355   if (dist > 0) {
 356     // Batched scan that prefetches the oop data, anticipating the access to
 357     // either header, oop field, or forwarding pointer. Not that we cannot
 358     // touch anything in oop, while it still being prefetched to get enough
 359     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 360     // disregarding the object size. However, since we know forwarding pointer


 401   }
 402 
 403   // Step 2. Accurate size-based traversal, happens past the TAMS.
 404   // This restarts the scan at TAMS, which makes sure we traverse all objects,
 405   // regardless of what happened at Step 1.
 406   HeapWord* cs = tams + BrooksPointer::word_size();
 407   while (cs < limit) {
 408     assert (cs > tams,  err_msg("only objects past TAMS here: "   PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams)));
 409     assert (cs < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit)));
 410     oop obj = oop(cs);
 411     int size = obj->size();
 412     do_object_marked_complete(cl, obj);
 413     cs += size + skip_objsize_delta;
 414   }
 415 }
 416 
 417 template<class T>
 418 inline void ShenandoahHeap::do_object_marked_complete(T* cl, oop obj) {
 419   assert(!oopDesc::is_null(obj), "sanity");
 420   assert(obj->is_oop(), "sanity");
 421   assert(_complete_marking_context->is_marked(obj), "object expected to be marked");
 422   cl->do_object(obj);
 423 }
 424 
 425 template <class T>
 426 class ShenandoahObjectToOopClosure : public ObjectClosure {
 427   T* _cl;
 428 public:
 429   ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
 430 
 431   void do_object(oop obj) {
 432     obj->oop_iterate(_cl);
 433   }
 434 };
 435 
 436 template <class T>
 437 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
 438   T* _cl;
 439   MemRegion _bounds;
 440 public:
 441   ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :


< prev index next >