1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "runtime/interfaceSupport.inline.hpp"
  33 
  34 ShenandoahSATBMarkQueueSet ShenandoahBarrierSet::_satb_mark_queue_set;
  35 
  36 template <bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER>
  37 class ShenandoahUpdateRefsForOopClosure: public ExtendedOopClosure {
  38 private:
  39   ShenandoahHeap* _heap;
  40   template <class T>
  41   inline void do_oop_work(T* p) {
  42     oop o;
  43     if (STOREVAL_WRITE_BARRIER) {
  44       o = _heap->evac_update_with_forwarded(p);
  45       if (!CompressedOops::is_null(o)) {
  46         ShenandoahBarrierSet::enqueue(o);
  47       }
  48     } else {
  49       o = _heap->maybe_update_with_forwarded(p);
  50     }
  51     if (UPDATE_MATRIX && !CompressedOops::is_null(o)) {
  52       _heap->connection_matrix()->set_connected(p, o);
  53     }
  54   }
  55 public:
  56   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()) {
  57     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  58   }
  59   void do_oop(oop* p)       { do_oop_work(p); }
  60   void do_oop(narrowOop* p) { do_oop_work(p); }
  61 };
  62 
  63 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  64   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  65              BarrierSet::FakeRtti(BarrierSet::Shenandoah)),
  66   _heap(heap)
  67 {
  68 }
  69 
  70 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  71   st->print("ShenandoahBarrierSet");
  72 }
  73 
  74 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  75   return bsn == BarrierSet::Shenandoah;
  76 }
  77 
  78 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  79   return true;
  80 }
  81 
  82 void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
  83   Unimplemented();
  84 }
  85 
  86 bool ShenandoahBarrierSet::need_update_refs_barrier() {
  87   if (UseShenandoahMatrix || _heap->is_concurrent_traversal_in_progress()) {
  88     return true;
  89   }
  90   if (_heap->shenandoahPolicy()->update_refs()) {
  91     return _heap->is_update_refs_in_progress();
  92   } else {
  93     return _heap->is_concurrent_mark_in_progress() && _heap->has_forwarded_objects();
  94   }
  95 }
  96 
  97 void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) {
  98   ShouldNotReachHere();
  99 }
 100 
 101 template <class T, bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER>
 102 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 103   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 104   ShenandoahUpdateRefsForOopClosure<UPDATE_MATRIX, STOREVAL_WRITE_BARRIER> cl;
 105   ShenandoahEvacOOMScope oom_evac_scope;
 106   T* dst = (T*) start;
 107   for (size_t i = 0; i < count; i++) {
 108     cl.do_oop(dst++);
 109   }
 110 }
 111 
 112 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 113   assert(UseShenandoahGC, "should be enabled");
 114   if (!ShenandoahCloneBarrier) return;
 115   if (!need_update_refs_barrier()) return;
 116 
 117   if (UseShenandoahMatrix) {
 118     if (_heap->is_concurrent_traversal_in_progress()) {
 119       if (UseCompressedOops) {
 120         write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ true>(start, count);
 121       } else {
 122         write_ref_array_loop<oop,       /* matrix = */ true, /* wb = */ true>(start, count);
 123       }
 124     } else {
 125       if (UseCompressedOops) {
 126         write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ false>(start, count);
 127       } else {
 128         write_ref_array_loop<oop,       /* matrix = */ true, /* wb = */ false>(start, count);
 129       }
 130     }
 131   } else {
 132     if (_heap->is_concurrent_traversal_in_progress()) {
 133       if (UseCompressedOops) {
 134         write_ref_array_loop<narrowOop,   /* matrix = */ false, /* wb = */ true>(start, count);
 135       } else {
 136         write_ref_array_loop<oop,         /* matrix = */ false, /* wb = */ true>(start, count);
 137       }
 138     } else {
 139       if (UseCompressedOops) {
 140         write_ref_array_loop<narrowOop,   /* matrix = */ false, /* wb = */ false>(start, count);
 141       } else {
 142         write_ref_array_loop<oop,         /* matrix = */ false, /* wb = */ false>(start, count);
 143       }
 144     }
 145   }
 146 }
 147 
 148 void ShenandoahBarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
 149   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 150   bs->write_ref_array_pre(dst, length, false);
 151 }
 152 
 153 void ShenandoahBarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
 154   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 155   bs->write_ref_array_pre(dst, length, false);
 156 }
 157 
 158 void ShenandoahBarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
 159   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 160   bs->ShenandoahBarrierSet::write_ref_array(dst, length);
 161 }
 162 
 163 
 164 template <class T>
 165 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, int count) {
 166   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_concgc());
 167   if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
 168     T* elem_ptr = dst;
 169     for (int i = 0; i < count; i++, elem_ptr++) {
 170       T heap_oop = RawAccess<>::oop_load(elem_ptr);
 171       if (!CompressedOops::is_null(heap_oop)) {
 172         enqueue(CompressedOops::decode_not_null(heap_oop));
 173       }
 174     }
 175   }
 176 }
 177 
 178 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
 179   if (! dest_uninitialized) {
 180     write_ref_array_pre_work(dst, count);
 181   }
 182 }
 183 
 184 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
 185   if (! dest_uninitialized) {
 186     write_ref_array_pre_work(dst, count);
 187   }
 188 }
 189 
 190 template <class T>
 191 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 192   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_concgc());
 193   if (_heap->is_concurrent_mark_in_progress()) {
 194     T heap_oop = RawAccess<>::oop_load(field);
 195     if (!CompressedOops::is_null(heap_oop)) {
 196       enqueue(CompressedOops::decode(heap_oop));
 197     }
 198   }
 199   if (UseShenandoahMatrix && ! CompressedOops::is_null(new_val)) {
 200     ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 201     matrix->set_connected(field, new_val);
 202   }
 203 }
 204 
 205 // These are the more general virtual versions.
 206 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 207   inline_write_ref_field_pre(field, new_val);
 208 }
 209 
 210 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 211   inline_write_ref_field_pre(field, new_val);
 212 }
 213 
 214 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 215   guarantee(false, "Not needed");
 216 }
 217 
 218 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 219   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_concgc());
 220   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress());
 221   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress());
 222 }
 223 
 224 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 225   assert(UseShenandoahGC, "should be enabled");
 226   if (!ShenandoahCloneBarrier) return;
 227   if (! need_update_refs_barrier()) return;
 228 
 229   // This is called for cloning an object (see jvm.cpp) after the clone
 230   // has been made. We are not interested in any 'previous value' because
 231   // it would be NULL in any case. But we *are* interested in any oop*
 232   // that potentially need to be updated.
 233 
 234   ShenandoahEvacOOMScope oom_evac_scope;
 235   oop obj = oop(mr.start());
 236   assert(oopDesc::is_oop(obj), "must be an oop");
 237   if (UseShenandoahMatrix) {
 238     if (_heap->is_concurrent_traversal_in_progress()) {
 239       ShenandoahUpdateRefsForOopClosure<true, true> cl;
 240       obj->oop_iterate(&cl);
 241     } else {
 242       ShenandoahUpdateRefsForOopClosure<true, false> cl;
 243       obj->oop_iterate(&cl);
 244     }
 245   } else {
 246     if (_heap->is_concurrent_traversal_in_progress()) {
 247       ShenandoahUpdateRefsForOopClosure<false, true> cl;
 248       obj->oop_iterate(&cl);
 249     } else {
 250       ShenandoahUpdateRefsForOopClosure<false, false> cl;
 251       obj->oop_iterate(&cl);
 252     }
 253   }
 254 }
 255 
 256 oop ShenandoahBarrierSet::read_barrier(oop src) {
 257   // Check for forwarded objects, because on Full GC path we might deal with
 258   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 259   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 260   // which provides a bit of performance improvement.
 261   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 262     return ShenandoahBarrierSet::resolve_forwarded(src);
 263   } else {
 264     return src;
 265   }
 266 }
 267 
 268 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 269   bool eq = oopDesc::unsafe_equals(obj1, obj2);
 270   if (! eq && ShenandoahAcmpBarrier) {
 271     OrderAccess::loadload();
 272     obj1 = resolve_forwarded(obj1);
 273     obj2 = resolve_forwarded(obj2);
 274     eq = oopDesc::unsafe_equals(obj1, obj2);
 275   }
 276   return eq;
 277 }
 278 
 279 JRT_LEAF(oopDesc*, ShenandoahBarrierSet::write_barrier_JRT(oopDesc* src))
 280   oop result = ((ShenandoahBarrierSet*)BarrierSet::barrier_set())->write_barrier(src);
 281   return (oopDesc*) result;
 282 JRT_END
 283 
 284 IRT_LEAF(oopDesc*, ShenandoahBarrierSet::write_barrier_IRT(oopDesc* src))
 285   oop result = ((ShenandoahBarrierSet*)BarrierSet::barrier_set())->write_barrier(src);
 286   return (oopDesc*) result;
 287 IRT_END
 288 
 289 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 290   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 291   if (!CompressedOops::is_null(obj)) {
 292     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 293     oop fwd = resolve_forwarded_not_null(obj);
 294     if (evac_in_progress &&
 295         _heap->in_collection_set(obj) &&
 296         oopDesc::unsafe_equals(obj, fwd)) {
 297       ShenandoahEvacOOMScope oom_evac_scope;
 298       return _heap->evacuate_object(obj, Thread::current());
 299     } else {
 300       return fwd;
 301     }
 302   } else {
 303     return obj;
 304   }
 305 }
 306 
 307 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 308   if (ShenandoahWriteBarrier) {
 309     return write_barrier_impl(obj);
 310   } else {
 311     return obj;
 312   }
 313 }
 314 
 315 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 316   if (ShenandoahStoreValEnqueueBarrier) {
 317     if (!CompressedOops::is_null(obj)) {
 318       obj = write_barrier(obj);
 319       enqueue(obj);
 320     }
 321   }
 322   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj)) {
 323     enqueue(obj);
 324   }
 325   if (ShenandoahStoreValReadBarrier) {
 326     obj = resolve_forwarded(obj);
 327   }
 328   return obj;
 329 }
 330 
 331 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 332   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 333     enqueue(obj);
 334   }
 335 }
 336 
 337 void ShenandoahBarrierSet::enqueue(oop obj) {
 338   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
 339   // Nulls should have been already filtered.
 340   assert(oopDesc::is_oop(obj, true), "Error");
 341 
 342   if (!_satb_mark_queue_set.is_active()) return;
 343   Thread* thr = Thread::current();
 344   if (thr->is_Java_thread()) {
 345     ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
 346   } else {
 347     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 348     _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
 349   }
 350 }
 351 
 352 #ifdef ASSERT
 353 void ShenandoahBarrierSet::verify_safe_oop(oop p) {
 354   shenandoah_assert_not_in_cset_except(NULL, p, (p == NULL) || ShenandoahHeap::heap()->cancelled_concgc());
 355 }
 356 #endif
 357 
 358 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 359   // Create thread local data
 360   ShenandoahThreadLocalData::create(thread);
 361 }
 362 
 363 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 364   // Destroy thread local data
 365   ShenandoahThreadLocalData::destroy(thread);
 366 }
 367 
 368 
 369 void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
 370   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 371   assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
 372   assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
 373   if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
 374     ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
 375   }
 376   ShenandoahThreadLocalData::set_gc_state(thread, ShenandoahHeap::heap()->gc_state());
 377 }
 378 
 379 void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
 380   ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
 381   if (UseTLAB && thread->gclab().is_initialized()) {
 382     thread->gclab().make_parsable(true);
 383   }
 384 }