< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp

Print this page
rev 54386 : 8221766: Load-reference barriers for Shenandoah


 201   if (!ShenandoahCloneBarrier) return;
 202   if (! need_update_refs_barrier()) return;
 203 
 204   // This is called for cloning an object (see jvm.cpp) after the clone
 205   // has been made. We are not interested in any 'previous value' because
 206   // it would be NULL in any case. But we *are* interested in any oop*
 207   // that potentially need to be updated.
 208 
 209   oop obj = oop(mr.start());
 210   shenandoah_assert_correct(NULL, obj);
 211   if (_heap->is_concurrent_traversal_in_progress()) {
 212     ShenandoahEvacOOMScope oom_evac_scope;
 213     ShenandoahUpdateRefsForOopClosure</* wb = */ true> cl;
 214     obj->oop_iterate(&cl);
 215   } else {
 216     ShenandoahUpdateRefsForOopClosure</* wb = */ false> cl;
 217     obj->oop_iterate(&cl);
 218   }
 219 }
 220 
 221 oop ShenandoahBarrierSet::read_barrier(oop src) {
 222   // Check for forwarded objects, because on Full GC path we might deal with
 223   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 224   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 225   // which provides a bit of performance improvement.
 226   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 227     return ShenandoahBarrierSet::resolve_forwarded(src);
 228   } else {
 229     return src;
 230   }
 231 }
 232 
 233 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 234   bool eq = oopDesc::equals_raw(obj1, obj2);
 235   if (! eq && ShenandoahAcmpBarrier) {
 236     OrderAccess::loadload();
 237     obj1 = resolve_forwarded(obj1);
 238     obj2 = resolve_forwarded(obj2);
 239     eq = oopDesc::equals_raw(obj1, obj2);
 240   }
 241   return eq;
 242 }
 243 
 244 oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
 245   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");

 246   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 247   shenandoah_assert_in_cset(NULL, obj);
 248 
 249   oop fwd = resolve_forwarded_not_null(obj);
 250   if (oopDesc::equals_raw(obj, fwd)) {
 251     ShenandoahEvacOOMScope oom_evac_scope;
 252 
 253     Thread* thread = Thread::current();
 254     oop res_oop = _heap->evacuate_object(obj, thread);
 255 
 256     // Since we are already here and paid the price of getting through runtime call adapters
 257     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 258     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 259     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 260     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 261     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 262     // their metadata (notably, klasses) may be incorrect already.
 263 
 264     size_t max = ShenandoahEvacAssist;
 265     if (max > 0) {


 271       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 272       assert(r->is_cset(), "sanity");
 273 
 274       HeapWord* cur = (HeapWord*)obj + obj->size() + ShenandoahBrooksPointer::word_size();
 275 
 276       size_t count = 0;
 277       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 278         oop cur_oop = oop(cur);
 279         if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 280           _heap->evacuate_object(cur_oop, thread);
 281         }
 282         cur = cur + cur_oop->size() + ShenandoahBrooksPointer::word_size();
 283       }
 284     }
 285 
 286     return res_oop;
 287   }
 288   return fwd;
 289 }
 290 
 291 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 292   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 293   if (!CompressedOops::is_null(obj)) {
 294     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 295     oop fwd = resolve_forwarded_not_null(obj);
 296     if (evac_in_progress &&
 297         _heap->in_collection_set(obj) &&
 298         oopDesc::equals_raw(obj, fwd)) {
 299       Thread *t = Thread::current();
 300       if (t->is_GC_task_thread()) {
 301         return _heap->evacuate_object(obj, t);
 302       } else {
 303         ShenandoahEvacOOMScope oom_evac_scope;
 304         return _heap->evacuate_object(obj, t);
 305       }
 306     } else {
 307       return fwd;
 308     }
 309   } else {
 310     return obj;
 311   }
 312 }
 313 
 314 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 315   if (ShenandoahWriteBarrier && _heap->has_forwarded_objects()) {
 316     return write_barrier_impl(obj);
 317   } else {
 318     return obj;
 319   }
 320 }
 321 
 322 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 323   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
 324     obj = write_barrier(obj);
 325     enqueue(obj);
 326   }
 327   if (ShenandoahStoreValReadBarrier) {
 328     obj = resolve_forwarded(obj);
 329   }
 330   return obj;
 331 }
 332 
 333 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 334   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 335     enqueue(obj);
 336   }
 337 }
 338 
 339 void ShenandoahBarrierSet::enqueue(oop obj) {
 340   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 341   assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
 342 
 343   // Filter marked objects before hitting the SATB queues. The same predicate would
 344   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 345   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 346   if (!_heap->requires_marking<false>(obj)) return;
 347 
 348   ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
 349 }
 350 




 201   if (!ShenandoahCloneBarrier) return;
 202   if (! need_update_refs_barrier()) return;
 203 
 204   // This is called for cloning an object (see jvm.cpp) after the clone
 205   // has been made. We are not interested in any 'previous value' because
 206   // it would be NULL in any case. But we *are* interested in any oop*
 207   // that potentially need to be updated.
 208 
 209   oop obj = oop(mr.start());
 210   shenandoah_assert_correct(NULL, obj);
 211   if (_heap->is_concurrent_traversal_in_progress()) {
 212     ShenandoahEvacOOMScope oom_evac_scope;
 213     ShenandoahUpdateRefsForOopClosure</* wb = */ true> cl;
 214     obj->oop_iterate(&cl);
 215   } else {
 216     ShenandoahUpdateRefsForOopClosure</* wb = */ false> cl;
 217     obj->oop_iterate(&cl);
 218   }
 219 }
 220 
 221 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 222   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 223     return load_reference_barrier_impl(obj);




 224   } else {
 225     return obj;
 226   }
 227 }
 228 
 229 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 230   if (obj != NULL) {
 231     return load_reference_barrier_not_null(obj);
 232   } else {
 233     return obj;


 234   }

 235 }
 236 
 237 
 238 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) {
 239   assert(ShenandoahLoadRefBarrier, "should be enabled");
 240   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 241   shenandoah_assert_in_cset(NULL, obj);
 242 
 243   oop fwd = resolve_forwarded_not_null(obj);
 244   if (oopDesc::equals_raw(obj, fwd)) {
 245     ShenandoahEvacOOMScope oom_evac_scope;
 246 
 247     Thread* thread = Thread::current();
 248     oop res_oop = _heap->evacuate_object(obj, thread);
 249 
 250     // Since we are already here and paid the price of getting through runtime call adapters
 251     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 252     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 253     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 254     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 255     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 256     // their metadata (notably, klasses) may be incorrect already.
 257 
 258     size_t max = ShenandoahEvacAssist;
 259     if (max > 0) {


 265       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 266       assert(r->is_cset(), "sanity");
 267 
 268       HeapWord* cur = (HeapWord*)obj + obj->size() + ShenandoahBrooksPointer::word_size();
 269 
 270       size_t count = 0;
 271       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 272         oop cur_oop = oop(cur);
 273         if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 274           _heap->evacuate_object(cur_oop, thread);
 275         }
 276         cur = cur + cur_oop->size() + ShenandoahBrooksPointer::word_size();
 277       }
 278     }
 279 
 280     return res_oop;
 281   }
 282   return fwd;
 283 }
 284 
 285 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 286   assert(ShenandoahLoadRefBarrier, "should be enabled");
 287   if (!CompressedOops::is_null(obj)) {
 288     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 289     oop fwd = resolve_forwarded_not_null(obj);
 290     if (evac_in_progress &&
 291         _heap->in_collection_set(obj) &&
 292         oopDesc::equals_raw(obj, fwd)) {
 293       Thread *t = Thread::current();
 294       if (t->is_GC_task_thread()) {
 295         return _heap->evacuate_object(obj, t);
 296       } else {
 297         ShenandoahEvacOOMScope oom_evac_scope;
 298         return _heap->evacuate_object(obj, t);
 299       }
 300     } else {
 301       return fwd;
 302     }
 303   } else {
 304     return obj;
 305   }
 306 }
 307 
 308 void ShenandoahBarrierSet::storeval_barrier(oop obj) {








 309   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {

 310     enqueue(obj);
 311   }




 312 }
 313 
 314 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 315   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 316     enqueue(obj);
 317   }
 318 }
 319 
 320 void ShenandoahBarrierSet::enqueue(oop obj) {
 321   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 322   assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
 323 
 324   // Filter marked objects before hitting the SATB queues. The same predicate would
 325   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 326   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 327   if (!_heap->requires_marking<false>(obj)) return;
 328 
 329   ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
 330 }
 331 


< prev index next >