< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp

Print this page
rev 52371 : [mq]: lvb.patch


  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "runtime/interfaceSupport.inline.hpp"
  35 #ifdef COMPILER1
  36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  37 #endif
  38 #ifdef COMPILER2
  39 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  40 #endif
  41 
  42 class ShenandoahBarrierSetC1;
  43 class ShenandoahBarrierSetC2;
  44 
  45 template <bool STOREVAL_WRITE_BARRIER>
  46 class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
  47 private:
  48   ShenandoahHeap* _heap;
  49   ShenandoahBarrierSet* _bs;
  50 
  51   template <class T>
  52   inline void do_oop_work(T* p) {
  53     oop o;
  54     if (STOREVAL_WRITE_BARRIER) {
  55       o = _heap->evac_update_with_forwarded(p);
  56       if (!CompressedOops::is_null(o)) {
  57         _bs->enqueue(o);
  58       }
  59     } else {
  60       _heap->maybe_update_with_forwarded(p);
  61     }
  62   }
  63 public:
  64   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
  65     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  66   }
  67 
  68   virtual void do_oop(oop* p)       { do_oop_work(p); }
  69   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  70 };
  71 
  72 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  73   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  74              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  75              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  76              BarrierSet::FakeRtti(BarrierSet::Shenandoah)),
  77   _heap(heap),
  78   _satb_mark_queue_set()
  79 {
  80 }
  81 
  82 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  83   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  84   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  85 }
  86 
  87 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  88   st->print("ShenandoahBarrierSet");
  89 }
  90 
  91 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  92   return bsn == BarrierSet::Shenandoah;
  93 }
  94 
  95 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  96   return true;
  97 }
  98 
  99 void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
 100   Unimplemented();
 101 }
 102 
 103 void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) {
 104   ShouldNotReachHere();
 105 }
 106 
 107 template <class T, bool STOREVAL_WRITE_BARRIER>
 108 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 109   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 110   ShenandoahUpdateRefsForOopClosure<STOREVAL_WRITE_BARRIER> cl;
 111   T* dst = (T*) start;
 112   for (size_t i = 0; i < count; i++) {
 113     cl.do_oop(dst++);
 114   }
 115 }
 116 
 117 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 118   assert(UseShenandoahGC, "should be enabled");
 119   if (count == 0) return;
 120   if (!ShenandoahCloneBarrier) return;
 121 
 122   if (!need_update_refs_barrier()) return;
 123 
 124   if (_heap->is_concurrent_traversal_in_progress()) {
 125     ShenandoahEvacOOMScope oom_evac_scope;
 126     if (UseCompressedOops) {
 127       write_ref_array_loop<narrowOop, /* wb = */ true>(start, count);
 128     } else {
 129       write_ref_array_loop<oop,       /* wb = */ true>(start, count);
 130     }
 131   } else {
 132     if (UseCompressedOops) {
 133       write_ref_array_loop<narrowOop, /* wb = */ false>(start, count);
 134     } else {
 135       write_ref_array_loop<oop,       /* wb = */ false>(start, count);
 136     }
 137   }
 138 }
 139 
 140 template <class T>
 141 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
 142   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
 143   if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
 144     T* elem_ptr = dst;
 145     for (size_t i = 0; i < count; i++, elem_ptr++) {
 146       T heap_oop = RawAccess<>::oop_load(elem_ptr);
 147       if (!CompressedOops::is_null(heap_oop)) {
 148         enqueue(CompressedOops::decode_not_null(heap_oop));
 149       }
 150     }
 151   }
 152 }
 153 
 154 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
 155   if (! dest_uninitialized) {
 156     write_ref_array_pre_work(dst, count);
 157   }
 158 }
 159 
 160 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
 161   if (! dest_uninitialized) {
 162     write_ref_array_pre_work(dst, count);
 163   }
 164 }
 165 
 166 template <class T>
 167 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 168   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
 169   if (_heap->is_concurrent_mark_in_progress()) {
 170     T heap_oop = RawAccess<>::oop_load(field);
 171     if (!CompressedOops::is_null(heap_oop)) {
 172       enqueue(CompressedOops::decode(heap_oop));
 173     }
 174   }
 175 }
 176 
 177 // These are the more general virtual versions.
 178 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 179   inline_write_ref_field_pre(field, new_val);
 180 }
 181 
 182 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 183   inline_write_ref_field_pre(field, new_val);
 184 }
 185 
 186 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 187   guarantee(false, "Not needed");
 188 }
 189 
 190 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 191   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 192   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 193   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 194 }
 195 
 196 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 197   assert(UseShenandoahGC, "should be enabled");
 198   if (!ShenandoahCloneBarrier) return;
 199   if (! need_update_refs_barrier()) return;
 200 
 201   // This is called for cloning an object (see jvm.cpp) after the clone
 202   // has been made. We are not interested in any 'previous value' because
 203   // it would be NULL in any case. But we *are* interested in any oop*
 204   // that potentially need to be updated.
 205 
 206   oop obj = oop(mr.start());
 207   shenandoah_assert_correct(NULL, obj);
 208   if (_heap->is_concurrent_traversal_in_progress()) {
 209     ShenandoahEvacOOMScope oom_evac_scope;
 210     ShenandoahUpdateRefsForOopClosure</* wb = */ true> cl;
 211     obj->oop_iterate(&cl);
 212   } else {
 213     ShenandoahUpdateRefsForOopClosure</* wb = */ false> cl;
 214     obj->oop_iterate(&cl);
 215   }
 216 }
 217 
 218 oop ShenandoahBarrierSet::read_barrier(oop src) {
 219   // Check for forwarded objects, because on Full GC path we might deal with
 220   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 221   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 222   // which provides a bit of performance improvement.
 223   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 224     return ShenandoahBarrierSet::resolve_forwarded(src);
 225   } else {
 226     return src;
 227   }
 228 }
 229 
 230 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 231   bool eq = oopDesc::unsafe_equals(obj1, obj2);
 232   if (! eq && ShenandoahAcmpBarrier) {
 233     OrderAccess::loadload();
 234     obj1 = resolve_forwarded(obj1);
 235     obj2 = resolve_forwarded(obj2);
 236     eq = oopDesc::unsafe_equals(obj1, obj2);
 237   }
 238   return eq;
 239 }
 240 
 241 oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
 242   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 243   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 244   shenandoah_assert_in_cset(NULL, obj);
 245 
 246   oop fwd = resolve_forwarded_not_null(obj);
 247   if (oopDesc::unsafe_equals(obj, fwd)) {
 248     ShenandoahEvacOOMScope oom_evac_scope;
 249 
 250     Thread* thread = Thread::current();
 251     oop res_oop = _heap->evacuate_object(obj, thread);
 252 
 253     // Since we are already here and paid the price of getting through runtime call adapters
 254     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 255     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 256     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 257     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 258     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 259     // their metadata (notably, klasses) may be incorrect already.
 260 
 261     size_t max = ShenandoahEvacAssist;
 262     if (max > 0) {
 263       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 264       // Other code uses complete marking context, because evac happens after the mark.
 265       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 266                                       _heap->marking_context() : _heap->complete_marking_context();
 267 
 268       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 269       assert(r->is_cset(), "sanity");
 270 
 271       HeapWord* cur = (HeapWord*)obj + obj->size() + BrooksPointer::word_size();
 272 
 273       size_t count = 0;
 274       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 275         oop cur_oop = oop(cur);
 276         if (oopDesc::unsafe_equals(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 277           _heap->evacuate_object(cur_oop, thread);
 278         }
 279         cur = cur + cur_oop->size() + BrooksPointer::word_size();
 280       }
 281     }
 282 
 283     return res_oop;
 284   }
 285   return fwd;
 286 }
 287 
 288 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 289   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 290   if (!CompressedOops::is_null(obj)) {
 291     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 292     oop fwd = resolve_forwarded_not_null(obj);
 293     if (evac_in_progress &&
 294         _heap->in_collection_set(obj) &&
 295         oopDesc::unsafe_equals(obj, fwd)) {
 296       Thread *t = Thread::current();
 297       if (t->is_GC_task_thread()) {
 298         return _heap->evacuate_object(obj, t);
 299       } else {
 300         ShenandoahEvacOOMScope oom_evac_scope;
 301         return _heap->evacuate_object(obj, t);
 302       }
 303     } else {
 304       return fwd;
 305     }
 306   } else {
 307     return obj;
 308   }
 309 }
 310 
 311 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 312   if (ShenandoahWriteBarrier) {
 313     return write_barrier_impl(obj);
 314   } else {
 315     return obj;
 316   }
 317 }
 318 
 319 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 320   if (ShenandoahStoreValEnqueueBarrier) {
 321     if (!CompressedOops::is_null(obj)) {
 322       obj = write_barrier(obj);
 323       enqueue(obj);
 324     }
 325   }
 326   if (ShenandoahStoreValReadBarrier) {
 327     obj = resolve_forwarded(obj);
 328   }
 329   return obj;
 330 }
 331 
 332 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 333   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 334     enqueue(obj);
 335   }
 336 }
 337 
 338 void ShenandoahBarrierSet::enqueue(oop obj) {
 339   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
 340   if (!_satb_mark_queue_set.is_active()) return;
 341 
 342   // Filter marked objects before hitting the SATB queues. The same predicate would
 343   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 344   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 345   if (!_heap->requires_marking(obj)) return;
 346 
 347   Thread* thr = Thread::current();
 348   if (thr->is_Java_thread()) {
 349     ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
 350   } else {
 351     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 352     _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
 353   }
 354 }
 355 
 356 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 357   // Create thread local data
 358   ShenandoahThreadLocalData::create(thread);
 359 }
 360 
 361 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 362   // Destroy thread local data
 363   ShenandoahThreadLocalData::destroy(thread);
 364 }
 365 
 366 
 367 void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
 368   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 369   assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
 370   assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
 371   if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
 372     ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
 373   }
 374   ShenandoahThreadLocalData::set_gc_state(thread, ShenandoahHeap::heap()->gc_state());
 375   ShenandoahThreadLocalData::initialize_gclab(thread);
 376 }
 377 
 378 void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
 379   ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
 380   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 381   if (gclab != NULL) {
 382     gclab->retire();
 383   }
 384 }


  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "runtime/interfaceSupport.inline.hpp"
  35 #ifdef COMPILER1
  36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  37 #endif
  38 #ifdef COMPILER2
  39 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  40 #endif
  41 
  42 class ShenandoahBarrierSetC1;
  43 class ShenandoahBarrierSetC2;
  44 



























  45 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  46   ShenandoahBaseBarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  47                            make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  48                            make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  49                            BarrierSet::FakeRtti(BarrierSet::Shenandoah),
  50                            heap) {}








  51 
  52 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  53   st->print("ShenandoahBarrierSet");
  54 }
  55 
  56 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  57   return bsn == BarrierSet::Shenandoah;
  58 }
  59 







































































  60 template <class T>
  61 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
  62   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
  63   if (_heap->is_concurrent_mark_in_progress()) {
  64     T heap_oop = RawAccess<>::oop_load(field);
  65     if (!CompressedOops::is_null(heap_oop)) {
  66       enqueue(CompressedOops::decode(heap_oop));
  67     }
  68   }
  69 }
  70 
  71 // These are the more general virtual versions.
  72 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
  73   inline_write_ref_field_pre(field, new_val);
  74 }
  75 
  76 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
  77   inline_write_ref_field_pre(field, new_val);
  78 }
  79 
  80 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
  81   guarantee(false, "Not needed");
  82 }
  83 
  84 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
  85   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
  86   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
  87   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());






























































































































































































  88 }
< prev index next >