< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page
rev 50076 : Fold Partial GC into Traversal GC


  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "gc/shared/markBitMap.inline.hpp"
  29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shenandoah/brooksPointer.inline.hpp"
  32 #include "gc/shenandoah/shenandoahAsserts.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  41 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/interfaceSupport.inline.hpp"
  46 #include "runtime/prefetch.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/copy.hpp"
  50 
  51 template <class T>
  52 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
  53   T o = RawAccess<>::oop_load(p);
  54   if (!CompressedOops::is_null(o)) {
  55     oop obj = CompressedOops::decode_not_null(o);
  56     _heap->update_with_forwarded_not_null(p, obj);
  57   }
  58 }
  59 


 115 #ifdef ASSERT
 116   else {
 117     shenandoah_assert_not_forwarded(p, obj);
 118   }
 119 #endif
 120   return obj;
 121 }
 122 
 123 template <class T>
 124 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 125   T o = RawAccess<>::oop_load(p);
 126   if (!CompressedOops::is_null(o)) {
 127     oop obj = CompressedOops::decode_not_null(o);
 128     return maybe_update_with_forwarded_not_null(p, obj);
 129   } else {
 130     return NULL;
 131   }
 132 }
 133 
 134 template <class T>
 135 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p, bool &evac) {
 136   evac = false;
 137   T o = RawAccess<>::oop_load(p);
 138   if (!CompressedOops::is_null(o)) {
 139     oop heap_oop = CompressedOops::decode_not_null(o);
 140     if (in_collection_set(heap_oop)) {
 141       oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 142       if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) {
 143         forwarded_oop = evacuate_object(heap_oop, Thread::current(), evac);
 144       }
 145       oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 146       if (oopDesc::unsafe_equals(prev, heap_oop)) {
 147         return forwarded_oop;
 148       } else {
 149         return NULL;
 150       }
 151     }
 152     return heap_oop;
 153   } else {
 154     return NULL;
 155   }
 156 }
 157 
 158 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 159   return (oop) Atomic::cmpxchg(n, addr, c);
 160 }
 161 
 162 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 163   narrowOop cmp = CompressedOops::encode(c);


 256 
 257 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 258   if (UseTLAB) {
 259     if (!thread->gclab().is_initialized()) {
 260       assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 261              "Performance: thread should have GCLAB: %s", thread->name());
 262       // No GCLABs in this thread, fallback to shared allocation
 263       return NULL;
 264     }
 265     HeapWord* obj = thread->gclab().allocate(size);
 266     if (obj != NULL) {
 267       return obj;
 268     }
 269     // Otherwise...
 270     return allocate_from_gclab_slow(thread, size);
 271   } else {
 272     return NULL;
 273   }
 274 }
 275 
 276 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) {
 277   evacuated = false;
 278 
 279   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
 280     // This thread went through the OOM during evac protocol and it is safe to return
 281     // the forward pointer. It must not attempt to evacuate any more.
 282     return ShenandoahBarrierSet::resolve_forwarded(p);
 283   }
 284 
 285   size_t size_no_fwdptr = (size_t) p->size();
 286   size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size();
 287 
 288   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 289 
 290   bool alloc_from_gclab = true;
 291   HeapWord* filler;
 292 #ifdef ASSERT
 293 
 294   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in ShenandoahOOMDuringEvacHandler");
 295 
 296   if (ShenandoahOOMDuringEvacALot &&
 297       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 298         filler = NULL;


 313     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 314 
 315     return ShenandoahBarrierSet::resolve_forwarded(p);
 316   }
 317 
 318   // Copy the object and initialize its forwarding ptr:
 319   HeapWord* copy = filler + BrooksPointer::word_size();
 320   oop copy_val = oop(copy);
 321 
 322   Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
 323   BrooksPointer::initialize(oop(copy));
 324 
 325   log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT,
 326                                     p2i(p), p2i(copy));
 327 
 328   // Try to install the new forwarding pointer.
 329   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 330 
 331   if (oopDesc::unsafe_equals(result, p)) {
 332     // Successfully evacuated. Our copy is now the public one!
 333     evacuated = true;
 334     log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " succeeded",
 335                                       p2i(p), p2i(copy));
 336 
 337 
 338 #ifdef ASSERT
 339     assert(oopDesc::is_oop(copy_val), "expect oop");
 340     assert(p->klass() == copy_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 341                                               p2i(p), p2i(copy));
 342 #endif
 343     return copy_val;
 344   }  else {
 345     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 346     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 347     // But if it happens to contain references to evacuated regions, those references would
 348     // not get updated for this stale copy during this cycle, and we will crash while scanning
 349     // it the next cycle.
 350     //
 351     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 352     // object will overwrite this stale copy, or the filler object on LAB retirement will
 353     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and


 374 }
 375 
 376 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 377   return region_in_collection_set(r->region_number());
 378 }
 379 
 380 template <class T>
 381 inline bool ShenandoahHeap::in_collection_set(T p) const {
 382   HeapWord* obj = (HeapWord*) p;
 383   assert(collection_set() != NULL, "Sanity");
 384   assert(is_in(obj), "should be in heap");
 385 
 386   return collection_set()->is_in(obj);
 387 }
 388 
 389 inline bool ShenandoahHeap::is_stable() const {
 390   return _gc_state.is_clear();
 391 }
 392 
 393 inline bool ShenandoahHeap::is_idle() const {
 394   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | PARTIAL | TRAVERSAL);
 395 }
 396 
 397 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
 398   return _gc_state.is_set(MARKING);
 399 }
 400 
 401 inline bool ShenandoahHeap::is_concurrent_partial_in_progress() const {
 402   return _gc_state.is_set(PARTIAL);
 403 }
 404 
 405 inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const {
 406   return _gc_state.is_set(TRAVERSAL);
 407 }
 408 
 409 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
 410   return _gc_state.is_set(EVACUATION);
 411 }
 412 
 413 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 414   return _gc_state.is_set(mask);
 415 }
 416 
 417 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 418   return _degenerated_gc_in_progress.is_set();
 419 }
 420 
 421 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 422   return _full_gc_in_progress.is_set();




  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "gc/shared/markBitMap.inline.hpp"
  29 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shenandoah/brooksPointer.inline.hpp"
  32 #include "gc/shenandoah/shenandoahAsserts.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  41 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/interfaceSupport.inline.hpp"
  46 #include "runtime/prefetch.hpp"
  47 #include "runtime/prefetch.inline.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/copy.hpp"
  50 
  51 template <class T>
  52 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
  53   T o = RawAccess<>::oop_load(p);
  54   if (!CompressedOops::is_null(o)) {
  55     oop obj = CompressedOops::decode_not_null(o);
  56     _heap->update_with_forwarded_not_null(p, obj);
  57   }
  58 }
  59 


 115 #ifdef ASSERT
 116   else {
 117     shenandoah_assert_not_forwarded(p, obj);
 118   }
 119 #endif
 120   return obj;
 121 }
 122 
 123 template <class T>
 124 inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) {
 125   T o = RawAccess<>::oop_load(p);
 126   if (!CompressedOops::is_null(o)) {
 127     oop obj = CompressedOops::decode_not_null(o);
 128     return maybe_update_with_forwarded_not_null(p, obj);
 129   } else {
 130     return NULL;
 131   }
 132 }
 133 
 134 template <class T>
 135 inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) {

 136   T o = RawAccess<>::oop_load(p);
 137   if (!CompressedOops::is_null(o)) {
 138     oop heap_oop = CompressedOops::decode_not_null(o);
 139     if (in_collection_set(heap_oop)) {
 140       oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop);
 141       if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) {
 142         forwarded_oop = evacuate_object(heap_oop, Thread::current());
 143       }
 144       oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 145       if (oopDesc::unsafe_equals(prev, heap_oop)) {
 146         return forwarded_oop;
 147       } else {
 148         return NULL;
 149       }
 150     }
 151     return heap_oop;
 152   } else {
 153     return NULL;
 154   }
 155 }
 156 
 157 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 158   return (oop) Atomic::cmpxchg(n, addr, c);
 159 }
 160 
 161 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 162   narrowOop cmp = CompressedOops::encode(c);


 255 
 256 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 257   if (UseTLAB) {
 258     if (!thread->gclab().is_initialized()) {
 259       assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
 260              "Performance: thread should have GCLAB: %s", thread->name());
 261       // No GCLABs in this thread, fallback to shared allocation
 262       return NULL;
 263     }
 264     HeapWord* obj = thread->gclab().allocate(size);
 265     if (obj != NULL) {
 266       return obj;
 267     }
 268     // Otherwise...
 269     return allocate_from_gclab_slow(thread, size);
 270   } else {
 271     return NULL;
 272   }
 273 }
 274 
 275 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {


 276   if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
 277     // This thread went through the OOM during evac protocol and it is safe to return
 278     // the forward pointer. It must not attempt to evacuate any more.
 279     return ShenandoahBarrierSet::resolve_forwarded(p);
 280   }
 281 
 282   size_t size_no_fwdptr = (size_t) p->size();
 283   size_t size_with_fwdptr = size_no_fwdptr + BrooksPointer::word_size();
 284 
 285   assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 286 
 287   bool alloc_from_gclab = true;
 288   HeapWord* filler;
 289 #ifdef ASSERT
 290 
 291   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in ShenandoahOOMDuringEvacHandler");
 292 
 293   if (ShenandoahOOMDuringEvacALot &&
 294       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
 295         filler = NULL;


 310     _oom_evac_handler.handle_out_of_memory_during_evacuation();
 311 
 312     return ShenandoahBarrierSet::resolve_forwarded(p);
 313   }
 314 
 315   // Copy the object and initialize its forwarding ptr:
 316   HeapWord* copy = filler + BrooksPointer::word_size();
 317   oop copy_val = oop(copy);
 318 
 319   Copy::aligned_disjoint_words((HeapWord*) p, copy, size_no_fwdptr);
 320   BrooksPointer::initialize(oop(copy));
 321 
 322   log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT,
 323                                     p2i(p), p2i(copy));
 324 
 325   // Try to install the new forwarding pointer.
 326   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 327 
 328   if (oopDesc::unsafe_equals(result, p)) {
 329     // Successfully evacuated. Our copy is now the public one!

 330     log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " succeeded",
 331                                       p2i(p), p2i(copy));
 332 
 333 
 334 #ifdef ASSERT
 335     assert(oopDesc::is_oop(copy_val), "expect oop");
 336     assert(p->klass() == copy_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 337                                               p2i(p), p2i(copy));
 338 #endif
 339     return copy_val;
 340   }  else {
 341     // Failed to evacuate. We need to deal with the object that is left behind. Since this
 342     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
 343     // But if it happens to contain references to evacuated regions, those references would
 344     // not get updated for this stale copy during this cycle, and we will crash while scanning
 345     // it the next cycle.
 346     //
 347     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 348     // object will overwrite this stale copy, or the filler object on LAB retirement will
 349     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and


 370 }
 371 
 372 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 373   return region_in_collection_set(r->region_number());
 374 }
 375 
 376 template <class T>
 377 inline bool ShenandoahHeap::in_collection_set(T p) const {
 378   HeapWord* obj = (HeapWord*) p;
 379   assert(collection_set() != NULL, "Sanity");
 380   assert(is_in(obj), "should be in heap");
 381 
 382   return collection_set()->is_in(obj);
 383 }
 384 
 385 inline bool ShenandoahHeap::is_stable() const {
 386   return _gc_state.is_clear();
 387 }
 388 
 389 inline bool ShenandoahHeap::is_idle() const {
 390   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL);
 391 }
 392 
 393 inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const {
 394   return _gc_state.is_set(MARKING);




 395 }
 396 
 397 inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const {
 398   return _gc_state.is_set(TRAVERSAL);
 399 }
 400 
 401 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
 402   return _gc_state.is_set(EVACUATION);
 403 }
 404 
 405 inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const {
 406   return _gc_state.is_set(mask);
 407 }
 408 
 409 inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const {
 410   return _degenerated_gc_in_progress.is_set();
 411 }
 412 
 413 inline bool ShenandoahHeap::is_full_gc_in_progress() const {
 414   return _full_gc_in_progress.is_set();


< prev index next >