< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page
rev 57589 : 8237632: Shenandoah fails some vmTestbase_nsk_jvmti tests with "Forwardee must point to a heap address"


 159       return forwarded_oop;
 160     }
 161 
 162     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 163     shenandoah_assert_not_forwarded(p, forwarded_oop);
 164     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
 165 
 166     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 167     // reference be updated later.
 168     oop witness = cas_oop(forwarded_oop, p, heap_oop);
 169 
 170     if (witness != heap_oop) {
 171       // CAS failed, someone had beat us to it. Normally, we would return the failure witness,
 172       // because that would be the proper write of to-space object, enforced by strong barriers.
 173       // However, there is a corner case with arraycopy. It can happen that a Java thread
 174       // beats us with an arraycopy, which first copies the array, which potentially contains
 175       // from-space refs, and only afterwards updates all from-space refs to to-space refs,
 176       // which leaves a short window where the new array elements can be from-space.
 177       // In this case, we can just resolve the result again. As we resolve, we need to consider
 178       // the contended write might have been NULL.
 179       oop result = ShenandoahBarrierSet::resolve_forwarded(witness);



 180       shenandoah_assert_not_forwarded_except(p, result, (result == NULL));
 181       shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc());
 182       return result;
 183     } else {
 184       // Success! We have updated with known to-space copy. We have already asserted it is sane.
 185       return forwarded_oop;
 186     }
 187   } else {
 188     shenandoah_assert_not_forwarded(p, heap_oop);
 189     return heap_oop;
 190   }
 191 }
 192 
 193 inline bool ShenandoahHeap::cancelled_gc() const {
 194   return _cancelled_gc.get() == CANCELLED;
 195 }
 196 
 197 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 198   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
 199     return cancelled_gc();


 300     //
 301     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 302     // object will overwrite this stale copy, or the filler object on LAB retirement will
 303     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 304     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 305     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 306     if (alloc_from_gclab) {
 307       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 308     } else {
 309       fill_with_object(copy, size);
 310       shenandoah_assert_correct(NULL, copy_val);
 311     }
 312     shenandoah_assert_correct(NULL, result);
 313     return result;
 314   }
 315 }
 316 
 317 template<bool RESOLVE>
 318 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 319   oop obj = oop(entry);
 320   if (RESOLVE) {
 321     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 322   }
 323   return !_marking_context->is_marked(obj);
 324 }
 325 
 326 template <class T>
 327 inline bool ShenandoahHeap::in_collection_set(T p) const {
 328   HeapWord* obj = (HeapWord*) p;
 329   assert(collection_set() != NULL, "Sanity");
 330   assert(is_in(obj), "should be in heap");
 331 
 332   return collection_set()->is_in(obj);
 333 }
 334 
 335 inline bool ShenandoahHeap::is_stable() const {
 336   return _gc_state.is_clear();
 337 }
 338 
 339 inline bool ShenandoahHeap::is_idle() const {
 340   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL);




 159       return forwarded_oop;
 160     }
 161 
 162     shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress());
 163     shenandoah_assert_not_forwarded(p, forwarded_oop);
 164     shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc());
 165 
 166     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 167     // reference be updated later.
 168     oop witness = cas_oop(forwarded_oop, p, heap_oop);
 169 
 170     if (witness != heap_oop) {
 171       // CAS failed, someone had beat us to it. Normally, we would return the failure witness,
 172       // because that would be the proper write of to-space object, enforced by strong barriers.
 173       // However, there is a corner case with arraycopy. It can happen that a Java thread
 174       // beats us with an arraycopy, which first copies the array, which potentially contains
 175       // from-space refs, and only afterwards updates all from-space refs to to-space refs,
 176       // which leaves a short window where the new array elements can be from-space.
 177       // In this case, we can just resolve the result again. As we resolve, we need to consider
 178       // the contended write might have been NULL.
 179       oop result = witness;
 180       if (!CompressedOops::is_null(witness) && in_collection_set(witness)) {
 181         result = ShenandoahBarrierSet::resolve_forwarded(witness);
 182       }
 183       shenandoah_assert_not_forwarded_except(p, result, (result == NULL));
 184       shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc());
 185       return result;
 186     } else {
 187       // Success! We have updated with known to-space copy. We have already asserted it is sane.
 188       return forwarded_oop;
 189     }
 190   } else {
 191     shenandoah_assert_not_forwarded(p, heap_oop);
 192     return heap_oop;
 193   }
 194 }
 195 
 196 inline bool ShenandoahHeap::cancelled_gc() const {
 197   return _cancelled_gc.get() == CANCELLED;
 198 }
 199 
 200 inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
 201   if (! (sts_active && ShenandoahSuspendibleWorkers)) {
 202     return cancelled_gc();


 303     //
 304     // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
 305     // object will overwrite this stale copy, or the filler object on LAB retirement will
 306     // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
 307     // have to explicitly overwrite the copy with the filler object. With that overwrite,
 308     // we have to keep the fwdptr initialized and pointing to our (stale) copy.
 309     if (alloc_from_gclab) {
 310       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
 311     } else {
 312       fill_with_object(copy, size);
 313       shenandoah_assert_correct(NULL, copy_val);
 314     }
 315     shenandoah_assert_correct(NULL, result);
 316     return result;
 317   }
 318 }
 319 
 320 template<bool RESOLVE>
 321 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 322   oop obj = oop(entry);
 323   if (RESOLVE && in_collection_set(obj)) {
 324     obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 325   }
 326   return !_marking_context->is_marked(obj);
 327 }
 328 
 329 template <class T>
 330 inline bool ShenandoahHeap::in_collection_set(T p) const {
 331   HeapWord* obj = (HeapWord*) p;
 332   assert(collection_set() != NULL, "Sanity");
 333   assert(is_in(obj), "should be in heap");
 334 
 335   return collection_set()->is_in(obj);
 336 }
 337 
 338 inline bool ShenandoahHeap::is_stable() const {
 339   return _gc_state.is_clear();
 340 }
 341 
 342 inline bool ShenandoahHeap::is_idle() const {
 343   return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL);


< prev index next >