1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "runtime/interfaceSupport.inline.hpp"
  33 
  34 ShenandoahSATBMarkQueueSet ShenandoahBarrierSet::_satb_mark_queue_set;
  35 
  36 template <bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER, bool ALWAYS_ENQUEUE>
  37 class ShenandoahUpdateRefsForOopClosure: public ExtendedOopClosure {
  38 private:
  39   ShenandoahHeap* _heap;
  40   template <class T>
  41   inline void do_oop_work(T* p) {
  42     oop o;
  43     if (STOREVAL_WRITE_BARRIER) {
  44       bool evac;
  45       o = _heap->evac_update_with_forwarded(p, evac);
  46       if ((ALWAYS_ENQUEUE || evac) && !CompressedOops::is_null(o)) {
  47         ShenandoahBarrierSet::enqueue(o);
  48       }
  49     } else {
  50       o = _heap->maybe_update_with_forwarded(p);
  51     }
  52     if (UPDATE_MATRIX && !CompressedOops::is_null(o)) {
  53       _heap->connection_matrix()->set_connected(p, o);
  54     }
  55   }
  56 public:
  57   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()) {
  58     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  59   }
  60   void do_oop(oop* p)       { do_oop_work(p); }
  61   void do_oop(narrowOop* p) { do_oop_work(p); }
  62 };
  63 
  64 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  65   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  66              BarrierSet::FakeRtti(BarrierSet::Shenandoah)),
  67   _heap(heap)
  68 {
  69 }
  70 
  71 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  72   st->print("ShenandoahBarrierSet");
  73 }
  74 
  75 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  76   return bsn == BarrierSet::Shenandoah;
  77 }
  78 
  79 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  80   return true;
  81 }
  82 
  83 void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
  84   Unimplemented();
  85 }
  86 
  87 bool ShenandoahBarrierSet::need_update_refs_barrier() {
  88   if (UseShenandoahMatrix || _heap->is_concurrent_traversal_in_progress()) {
  89     return true;
  90   }
  91   if (_heap->shenandoahPolicy()->update_refs()) {
  92     return _heap->is_update_refs_in_progress();
  93   } else {
  94     return _heap->is_concurrent_mark_in_progress() && _heap->has_forwarded_objects();
  95   }
  96 }
  97 
  98 void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) {
  99   ShouldNotReachHere();
 100 }
 101 
 102 template <class T, bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER, bool ALWAYS_ENQUEUE>
 103 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 104   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 105   ShenandoahUpdateRefsForOopClosure<UPDATE_MATRIX, STOREVAL_WRITE_BARRIER, ALWAYS_ENQUEUE> cl;
 106   ShenandoahEvacOOMScope oom_evac_scope;
 107   T* dst = (T*) start;
 108   for (size_t i = 0; i < count; i++) {
 109     cl.do_oop(dst++);
 110   }
 111 }
 112 
 113 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 114   assert(UseShenandoahGC, "should be enabled");
 115   if (!ShenandoahCloneBarrier) return;
 116   if (!need_update_refs_barrier()) return;
 117 
 118   if (UseShenandoahMatrix) {
 119     assert(! _heap->is_concurrent_traversal_in_progress(), "traversal GC should take another branch");
 120     if (_heap->is_concurrent_partial_in_progress()) {
 121       if (UseCompressedOops) {
 122         write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ true,  /* enqueue = */ false>(start, count);
 123       } else {
 124         write_ref_array_loop<oop,       /* matrix = */ true, /* wb = */ true,  /* enqueue = */ false>(start, count);
 125       }
 126     } else {
 127       if (UseCompressedOops) {
 128         write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ false, /* enqueue = */ false>(start, count);
 129       } else {
 130         write_ref_array_loop<oop,       /* matrix = */ true, /* wb = */ false, /* enqueue = */ false>(start, count);
 131       }
 132     }
 133   } else if (_heap->is_concurrent_traversal_in_progress()) {
 134     if (UseCompressedOops) {
 135       write_ref_array_loop<narrowOop,   /* matrix = */ false, /* wb = */ true, /* enqueue = */ true>(start, count);
 136     } else {
 137       write_ref_array_loop<oop,         /* matrix = */ false, /* wb = */ true, /* enqueue = */ true>(start, count);
 138     }
 139   } else {
 140     if (UseCompressedOops) {
 141       write_ref_array_loop<narrowOop,   /* matrix = */ false, /* wb = */ false, /* enqueue = */ false>(start, count);
 142     } else {
 143       write_ref_array_loop<oop,         /* matrix = */ false, /* wb = */ false, /* enqueue = */ false>(start, count);
 144     }
 145   }
 146 }
 147 
 148 void ShenandoahBarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
 149   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 150   bs->write_ref_array_pre(dst, length, false);
 151 }
 152 
 153 void ShenandoahBarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
 154   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 155   bs->write_ref_array_pre(dst, length, false);
 156 }
 157 
 158 void ShenandoahBarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
 159   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 160   bs->ShenandoahBarrierSet::write_ref_array(dst, length);
 161 }
 162 
 163 
 164 template <class T>
 165 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, int count) {
 166   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_concgc());
 167   if (ShenandoahSATBBarrier ||
 168       (ShenandoahConditionalSATBBarrier && _heap->is_concurrent_mark_in_progress())) {
 169     T* elem_ptr = dst;
 170     for (int i = 0; i < count; i++, elem_ptr++) {
 171       T heap_oop = RawAccess<>::oop_load(elem_ptr);
 172       if (!CompressedOops::is_null(heap_oop)) {
 173         enqueue(CompressedOops::decode_not_null(heap_oop));
 174       }
 175     }
 176   }
 177 }
 178 
 179 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
 180   if (! dest_uninitialized) {
 181     write_ref_array_pre_work(dst, count);
 182   }
 183 }
 184 
 185 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
 186   if (! dest_uninitialized) {
 187     write_ref_array_pre_work(dst, count);
 188   }
 189 }
 190 
 191 template <class T>
 192 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 193   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_concgc());
 194   if (_heap->is_concurrent_mark_in_progress()) {
 195     T heap_oop = RawAccess<>::oop_load(field);
 196     if (!CompressedOops::is_null(heap_oop)) {
 197       enqueue(CompressedOops::decode(heap_oop));
 198     }
 199   }
 200   if (UseShenandoahMatrix && ! CompressedOops::is_null(new_val)) {
 201     ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 202     matrix->set_connected(field, new_val);
 203   }
 204 }
 205 
 206 // These are the more general virtual versions.
 207 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 208   inline_write_ref_field_pre(field, new_val);
 209 }
 210 
 211 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 212   inline_write_ref_field_pre(field, new_val);
 213 }
 214 
 215 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 216   guarantee(false, "Not needed");
 217 }
 218 
 219 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 220   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_concgc());
 221   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress());
 222   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress());
 223 }
 224 
 225 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 226   assert(UseShenandoahGC, "should be enabled");
 227   if (!ShenandoahCloneBarrier) return;
 228   if (! need_update_refs_barrier()) return;
 229 
 230   // This is called for cloning an object (see jvm.cpp) after the clone
 231   // has been made. We are not interested in any 'previous value' because
 232   // it would be NULL in any case. But we *are* interested in any oop*
 233   // that potentially need to be updated.
 234 
 235   ShenandoahEvacOOMScope oom_evac_scope;
 236   oop obj = oop(mr.start());
 237   assert(oopDesc::is_oop(obj), "must be an oop");
 238   if (UseShenandoahMatrix) {
 239     assert(! _heap->is_concurrent_traversal_in_progress(), "traversal GC should take another branch");
 240     if (_heap->is_concurrent_partial_in_progress()) {
 241       ShenandoahUpdateRefsForOopClosure<true, true, false> cl;
 242       obj->oop_iterate(&cl);
 243     } else {
 244       ShenandoahUpdateRefsForOopClosure<true, false, false> cl;
 245       obj->oop_iterate(&cl);
 246     }
 247   } else {
 248     assert(! _heap->is_concurrent_partial_in_progress(), "partial GC needs matrix");
 249     if (_heap->is_concurrent_traversal_in_progress()) {
 250       ShenandoahUpdateRefsForOopClosure<false, true, true> cl;
 251       obj->oop_iterate(&cl);
 252     } else {
 253       ShenandoahUpdateRefsForOopClosure<false, false, false> cl;
 254       obj->oop_iterate(&cl);
 255     }
 256   }
 257 }
 258 
 259 oop ShenandoahBarrierSet::read_barrier(oop src) {
 260   // Check for forwarded objects, because on Full GC path we might deal with
 261   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 262   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 263   // which provides a bit of performance improvement.
 264   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 265     return ShenandoahBarrierSet::resolve_forwarded(src);
 266   } else {
 267     return src;
 268   }
 269 }
 270 
 271 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 272   bool eq = oopDesc::unsafe_equals(obj1, obj2);
 273   if (! eq && ShenandoahAcmpBarrier) {
 274     OrderAccess::loadload();
 275     obj1 = resolve_forwarded(obj1);
 276     obj2 = resolve_forwarded(obj2);
 277     eq = oopDesc::unsafe_equals(obj1, obj2);
 278   }
 279   return eq;
 280 }
 281 
 282 JRT_LEAF(oopDesc*, ShenandoahBarrierSet::write_barrier_JRT(oopDesc* src))
 283   oop result = ((ShenandoahBarrierSet*)BarrierSet::barrier_set())->write_barrier(src);
 284   return (oopDesc*) result;
 285 JRT_END
 286 
 287 IRT_LEAF(oopDesc*, ShenandoahBarrierSet::write_barrier_IRT(oopDesc* src))
 288   oop result = ((ShenandoahBarrierSet*)BarrierSet::barrier_set())->write_barrier(src);
 289   return (oopDesc*) result;
 290 IRT_END
 291 
 292 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 293   assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier), "should be enabled");
 294   if (!CompressedOops::is_null(obj)) {
 295     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL);
 296     oop fwd = resolve_forwarded_not_null(obj);
 297     if (evac_in_progress &&
 298         _heap->in_collection_set(obj) &&
 299         oopDesc::unsafe_equals(obj, fwd)) {
 300       ShenandoahEvacOOMScope oom_evac_scope;
 301       bool evac;
 302       oop copy = _heap->evacuate_object(obj, Thread::current(), evac);
 303       if (evac && _heap->is_concurrent_partial_in_progress()) {
 304         enqueue(copy);
 305       }
 306       return copy;
 307     } else {
 308       return fwd;
 309     }
 310   } else {
 311     return obj;
 312   }
 313 }
 314 
 315 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 316   if (ShenandoahWriteBarrier) {
 317     return write_barrier_impl(obj);
 318   } else {
 319     return obj;
 320   }
 321 }
 322 
 323 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 324   if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
 325     obj = write_barrier(obj);
 326   }
 327   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj)) {
 328     enqueue(obj);
 329   }
 330   if (ShenandoahStoreValReadBarrier) {
 331     obj = resolve_forwarded(obj);
 332   }
 333   return obj;
 334 }
 335 
 336 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 337   if (ShenandoahKeepAliveBarrier) {
 338     if (_heap->is_concurrent_mark_in_progress()) {
 339       enqueue(obj);
 340     } else if (_heap->is_concurrent_partial_in_progress()) {
 341       write_barrier_impl(obj);
 342     }
 343   }
 344 }
 345 
 346 void ShenandoahBarrierSet::enqueue(oop obj) {
 347   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
 348   // Nulls should have been already filtered.
 349   assert(oopDesc::is_oop(obj, true), "Error");
 350 
 351   if (!_satb_mark_queue_set.is_active()) return;
 352   Thread* thr = Thread::current();
 353   if (thr->is_Java_thread()) {
 354     ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
 355   } else {
 356     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 357     _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
 358   }
 359 }
 360 
 361 #ifdef ASSERT
 362 void ShenandoahBarrierSet::verify_safe_oop(oop p) {
 363   shenandoah_assert_not_in_cset_except(NULL, p, (p == NULL) || ShenandoahHeap::heap()->cancelled_concgc());
 364 }
 365 #endif
 366 
 367 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 368   // Create thread local data
 369   ShenandoahThreadLocalData::create(thread);
 370 }
 371 
 372 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 373   // Destroy thread local data
 374   ShenandoahThreadLocalData::destroy(thread);
 375 }
 376 
 377 
 378 void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
 379   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 380   assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
 381   assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
 382   if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
 383     ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
 384   }
 385   ShenandoahThreadLocalData::set_gc_state(thread, ShenandoahHeap::heap()->gc_state());
 386 }
 387 
 388 void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
 389   ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
 390   if (UseTLAB && thread->gclab().is_initialized()) {
 391     thread->gclab().make_parsable(true);
 392   }
 393 }