1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  33 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  34 #include "memory/iterator.inline.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #ifdef COMPILER1
  37 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  38 #endif
  39 #ifdef COMPILER2
  40 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  41 #endif
  42 
  43 class ShenandoahBarrierSetC1;
  44 class ShenandoahBarrierSetC2;
  45 
  46 ShenandoahSATBMarkQueueSet ShenandoahBarrierSet::_satb_mark_queue_set;
  47 
  48 template <bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER>
  49 class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
  50 private:
  51   ShenandoahHeap* _heap;
  52   ShenandoahBarrierSet* _bs;
  53 
  54   template <class T>
  55   inline void do_oop_work(T* p) {
  56     oop o;
  57     if (STOREVAL_WRITE_BARRIER) {
  58       o = _heap->evac_update_with_forwarded(p);
  59       if (!CompressedOops::is_null(o)) {
  60         _bs->enqueue(o);
  61       }
  62     } else {
  63       o = _heap->maybe_update_with_forwarded(p);
  64     }
  65     if (UPDATE_MATRIX && !CompressedOops::is_null(o)) {
  66       _heap->connection_matrix()->set_connected(p, o);
  67     }
  68   }
  69 public:
  70   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
  71     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  72   }
  73 
  74   virtual void do_oop(oop* p)       { do_oop_work(p); }
  75   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  76 };
  77 
  78 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  79   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  80              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  81              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  82              BarrierSet::FakeRtti(BarrierSet::Shenandoah)),
  83   _heap(heap)
  84 {
  85 }
  86 
  87 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  88   st->print("ShenandoahBarrierSet");
  89 }
  90 
  91 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  92   return bsn == BarrierSet::Shenandoah;
  93 }
  94 
  95 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  96   return true;
  97 }
  98 
  99 void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
 100   Unimplemented();
 101 }
 102 
 103 void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) {
 104   ShouldNotReachHere();
 105 }
 106 
 107 template <class T, bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER>
 108 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 109   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 110   ShenandoahUpdateRefsForOopClosure<UPDATE_MATRIX, STOREVAL_WRITE_BARRIER> cl;
 111   T* dst = (T*) start;
 112   for (size_t i = 0; i < count; i++) {
 113     cl.do_oop(dst++);
 114   }
 115 }
 116 
 117 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 118   assert(UseShenandoahGC, "should be enabled");
 119   if (count == 0) return;
 120   if (!ShenandoahCloneBarrier) return;
 121 
 122   if (!need_update_refs_barrier()) return;
 123 
 124   if (_heap->is_concurrent_traversal_in_progress()) {
 125     if (count > ShenandoahEnqueueArrayCopyThreshold) {
 126       _heap->traversal_gc()->push_arraycopy(start, count);
 127     } else {
 128       ShenandoahEvacOOMScope oom_evac_scope;
 129       if (UseShenandoahMatrix) {
 130         if (UseCompressedOops) {
 131           write_ref_array_loop<narrowOop, /* matrix = */ true,  /* wb = */ true>(start, count);
 132         } else {
 133           write_ref_array_loop<oop,       /* matrix = */ true,  /* wb = */ true>(start, count);
 134         }
 135       } else {
 136         if (UseCompressedOops) {
 137           write_ref_array_loop<narrowOop, /* matrix = */ false, /* wb = */ true>(start, count);
 138         } else {
 139           write_ref_array_loop<oop,       /* matrix = */ false, /* wb = */ true>(start, count);
 140         }
 141       }
 142     }
 143   } else {
 144     if (UseShenandoahMatrix) {
 145       if (UseCompressedOops) {
 146         write_ref_array_loop<narrowOop, /* matrix = */ true,  /* wb = */ false>(start, count);
 147       } else {
 148         write_ref_array_loop<oop,       /* matrix = */ true,  /* wb = */ false>(start, count);
 149       }
 150     } else {
 151       if (UseCompressedOops) {
 152         write_ref_array_loop<narrowOop, /* matrix = */ false, /* wb = */ false>(start, count);
 153       } else {
 154         write_ref_array_loop<oop,       /* matrix = */ false, /* wb = */ false>(start, count);
 155       }
 156     }
 157   }
 158 }
 159 
 160 template <class T>
 161 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
 162   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
 163   if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
 164     T* elem_ptr = dst;
 165     for (size_t i = 0; i < count; i++, elem_ptr++) {
 166       T heap_oop = RawAccess<>::oop_load(elem_ptr);
 167       if (!CompressedOops::is_null(heap_oop)) {
 168         enqueue(CompressedOops::decode_not_null(heap_oop));
 169       }
 170     }
 171   }
 172 }
 173 
 174 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
 175   if (! dest_uninitialized) {
 176     write_ref_array_pre_work(dst, count);
 177   }
 178 }
 179 
 180 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
 181   if (! dest_uninitialized) {
 182     write_ref_array_pre_work(dst, count);
 183   }
 184 }
 185 
 186 template <class T>
 187 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 188   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
 189   if (_heap->is_concurrent_mark_in_progress()) {
 190     T heap_oop = RawAccess<>::oop_load(field);
 191     if (!CompressedOops::is_null(heap_oop)) {
 192       enqueue(CompressedOops::decode(heap_oop));
 193     }
 194   }
 195   if (UseShenandoahMatrix && ! CompressedOops::is_null(new_val)) {
 196     ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 197     matrix->set_connected(field, new_val);
 198   }
 199 }
 200 
 201 // These are the more general virtual versions.
 202 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 203   inline_write_ref_field_pre(field, new_val);
 204 }
 205 
 206 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 207   inline_write_ref_field_pre(field, new_val);
 208 }
 209 
 210 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 211   guarantee(false, "Not needed");
 212 }
 213 
 214 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 215   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 216   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 217   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 218 }
 219 
 220 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 221   assert(UseShenandoahGC, "should be enabled");
 222   if (!ShenandoahCloneBarrier) return;
 223   if (! need_update_refs_barrier()) return;
 224 
 225   // This is called for cloning an object (see jvm.cpp) after the clone
 226   // has been made. We are not interested in any 'previous value' because
 227   // it would be NULL in any case. But we *are* interested in any oop*
 228   // that potentially need to be updated.
 229 
 230   oop obj = oop(mr.start());
 231   assert(oopDesc::is_oop(obj), "must be an oop");
 232   if (_heap->is_concurrent_traversal_in_progress()) {
 233     if ((size_t) obj->size() > ShenandoahEnqueueArrayCopyThreshold) {
 234       _heap->traversal_gc()->push_arraycopy(mr.start(), 0);
 235     } else {
 236       ShenandoahEvacOOMScope oom_evac_scope;
 237       if (UseShenandoahMatrix) {
 238         ShenandoahUpdateRefsForOopClosure</* matrix = */ true,  /* wb = */ true> cl;
 239         obj->oop_iterate(&cl);
 240       } else {
 241         ShenandoahUpdateRefsForOopClosure</* matrix = */ false, /* wb = */ true> cl;
 242         obj->oop_iterate(&cl);
 243       }
 244     }
 245   } else {
 246     if (UseShenandoahMatrix) {
 247       ShenandoahUpdateRefsForOopClosure</* matrix = */ true,  /* wb = */ false> cl;
 248       obj->oop_iterate(&cl);
 249     } else {
 250       ShenandoahUpdateRefsForOopClosure</* matrix = */ false, /* wb = */ false> cl;
 251       obj->oop_iterate(&cl);
 252     }
 253   }
 254 }
 255 
 256 oop ShenandoahBarrierSet::read_barrier(oop src) {
 257   // Check for forwarded objects, because on Full GC path we might deal with
 258   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 259   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 260   // which provides a bit of performance improvement.
 261   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 262     return ShenandoahBarrierSet::resolve_forwarded(src);
 263   } else {
 264     return src;
 265   }
 266 }
 267 
 268 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 269   bool eq = oopDesc::unsafe_equals(obj1, obj2);
 270   if (! eq && ShenandoahAcmpBarrier) {
 271     OrderAccess::loadload();
 272     obj1 = resolve_forwarded(obj1);
 273     obj2 = resolve_forwarded(obj2);
 274     eq = oopDesc::unsafe_equals(obj1, obj2);
 275   }
 276   return eq;
 277 }
 278 
 279 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 280   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 281   if (!CompressedOops::is_null(obj)) {
 282     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 283     oop fwd = resolve_forwarded_not_null(obj);
 284     if (evac_in_progress &&
 285         _heap->in_collection_set(obj) &&
 286         oopDesc::unsafe_equals(obj, fwd)) {
 287       ShenandoahEvacOOMScope oom_evac_scope;
 288       return _heap->evacuate_object(obj, Thread::current());
 289     } else {
 290       return fwd;
 291     }
 292   } else {
 293     return obj;
 294   }
 295 }
 296 
 297 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 298   if (ShenandoahWriteBarrier) {
 299     return write_barrier_impl(obj);
 300   } else {
 301     return obj;
 302   }
 303 }
 304 
 305 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 306   if (ShenandoahStoreValEnqueueBarrier) {
 307     if (!CompressedOops::is_null(obj)) {
 308       obj = write_barrier(obj);
 309       enqueue(obj);
 310     }
 311   }
 312   if (ShenandoahStoreValReadBarrier) {
 313     obj = resolve_forwarded(obj);
 314   }
 315   return obj;
 316 }
 317 
 318 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 319   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 320     enqueue(obj);
 321   }
 322 }
 323 
 324 void ShenandoahBarrierSet::enqueue(oop obj) {
 325   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
 326   // Nulls should have been already filtered.
 327   assert(oopDesc::is_oop(obj, true), "Error");
 328 
 329   if (!_satb_mark_queue_set.is_active()) return;
 330 
 331   // Filter marked objects before hitting the SATB queues. The same predicate would
 332   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 333   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 334   if (!_heap->requires_marking(obj)) return;
 335 
 336   Thread* thr = Thread::current();
 337   if (thr->is_Java_thread()) {
 338     ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
 339   } else {
 340     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 341     _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
 342   }
 343 }
 344 
 345 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 346   // Create thread local data
 347   ShenandoahThreadLocalData::create(thread);
 348 }
 349 
 350 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 351   // Destroy thread local data
 352   ShenandoahThreadLocalData::destroy(thread);
 353 }
 354 
 355 
 356 void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
 357   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 358   assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
 359   assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
 360   if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
 361     ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
 362   }
 363   ShenandoahThreadLocalData::set_gc_state(thread, ShenandoahHeap::heap()->gc_state());
 364   ShenandoahThreadLocalData::initialize_gclab(thread);
 365 }
 366 
 367 void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
 368   ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
 369   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 370   if (gclab != NULL) {
 371     gclab->flush_and_retire_stats(_heap->mutator_gclab_stats());
 372   }
 373 }