1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBaseBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "runtime/interfaceSupport.inline.hpp"
  35 
  36 template <bool STOREVAL_WRITE_BARRIER>
  37 class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
  38 private:
  39     ShenandoahHeap* _heap;
  40     ShenandoahBaseBarrierSet* _bs;
  41 
  42     template <class T>
  43     inline void do_oop_work(T* p) {
  44       oop o;
  45       if (STOREVAL_WRITE_BARRIER) {
  46         o = _heap->evac_update_with_forwarded(p);
  47         if (!CompressedOops::is_null(o)) {
  48           _bs->enqueue(o);
  49         }
  50       } else {
  51         _heap->maybe_update_with_forwarded(p);
  52       }
  53     }
  54 public:
  55     ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBaseBarrierSet::barrier_set()) {
  56       assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  57     }
  58 
  59     virtual void do_oop(oop* p)       { do_oop_work(p); }
  60     virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  61 };
  62 
  63 ShenandoahBaseBarrierSet::ShenandoahBaseBarrierSet(BarrierSetAssembler* bsasm,
  64                                                    BarrierSetC1* bsc1,
  65                                                    BarrierSetC2* bsc2,
  66                                                    const FakeRtti& fake_rtti,
  67                                                    ShenandoahHeap* heap) :
  68         BarrierSet(bsasm, bsc1, bsc2, fake_rtti),
  69         _satb_mark_queue_set(),
  70         _heap(heap)
  71 {
  72 }
  73 
  74 ShenandoahBaseBarrierSetAssembler* ShenandoahBaseBarrierSet::assembler() {
  75   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  76   return reinterpret_cast<ShenandoahBaseBarrierSetAssembler*>(bsa);
  77 }
  78 
  79 bool ShenandoahBaseBarrierSet::is_aligned(HeapWord* hw) {
  80   return true;
  81 }
  82 
  83 void ShenandoahBaseBarrierSet::resize_covered_region(MemRegion mr) {
  84   Unimplemented();
  85 }
  86 
  87 void ShenandoahBaseBarrierSet::write_ref_array_work(MemRegion r) {
  88   ShouldNotReachHere();
  89 }
  90 
  91 template <class T, bool STOREVAL_WRITE_BARRIER>
  92 void ShenandoahBaseBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
  93   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  94   ShenandoahUpdateRefsForOopClosure<STOREVAL_WRITE_BARRIER> cl;
  95   T* dst = (T*) start;
  96   for (size_t i = 0; i < count; i++) {
  97     cl.do_oop(dst++);
  98   }
  99 }
 100 
 101 void ShenandoahBaseBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 102   assert(UseShenandoahGC, "should be enabled");
 103   if (count == 0) return;
 104   if (!ShenandoahCloneBarrier) return;
 105 
 106   if (!need_update_refs_barrier()) return;
 107 
 108   if (_heap->is_concurrent_traversal_in_progress()) {
 109     ShenandoahEvacOOMScope oom_evac_scope;
 110     if (UseCompressedOops) {
 111       write_ref_array_loop<narrowOop, /* wb = */ true>(start, count);
 112     } else {
 113       write_ref_array_loop<oop,       /* wb = */ true>(start, count);
 114     }
 115   } else {
 116     if (UseCompressedOops) {
 117       write_ref_array_loop<narrowOop, /* wb = */ false>(start, count);
 118     } else {
 119       write_ref_array_loop<oop,       /* wb = */ false>(start, count);
 120     }
 121   }
 122 }
 123 
 124 template <class T>
 125 void ShenandoahBaseBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
 126   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
 127   if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
 128     T* elem_ptr = dst;
 129     for (size_t i = 0; i < count; i++, elem_ptr++) {
 130       T heap_oop = RawAccess<>::oop_load(elem_ptr);
 131       if (!CompressedOops::is_null(heap_oop)) {
 132         enqueue(CompressedOops::decode_not_null(heap_oop));
 133       }
 134     }
 135   }
 136 }
 137 
 138 void ShenandoahBaseBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
 139   if (! dest_uninitialized) {
 140     write_ref_array_pre_work(dst, count);
 141   }
 142 }
 143 
 144 void ShenandoahBaseBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
 145   if (! dest_uninitialized) {
 146     write_ref_array_pre_work(dst, count);
 147   }
 148 }
 149 
 150 template <class T>
 151 inline void ShenandoahBaseBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 152   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
 153   if (_heap->is_concurrent_mark_in_progress()) {
 154     T heap_oop = RawAccess<>::oop_load(field);
 155     if (!CompressedOops::is_null(heap_oop)) {
 156       enqueue(CompressedOops::decode(heap_oop));
 157     }
 158   }
 159 }
 160 
 161 // These are the more general virtual versions.
 162 void ShenandoahBaseBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 163   inline_write_ref_field_pre(field, new_val);
 164 }
 165 
 166 void ShenandoahBaseBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 167   inline_write_ref_field_pre(field, new_val);
 168 }
 169 
 170 void ShenandoahBaseBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 171   guarantee(false, "Not needed");
 172 }
 173 
 174 void ShenandoahBaseBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 175   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 176   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 177   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 178 }
 179 
 180 void ShenandoahBaseBarrierSet::write_region(MemRegion mr) {
 181   assert(UseShenandoahGC, "should be enabled");
 182   if (!ShenandoahCloneBarrier) return;
 183   if (! need_update_refs_barrier()) return;
 184 
 185   // This is called for cloning an object (see jvm.cpp) after the clone
 186   // has been made. We are not interested in any 'previous value' because
 187   // it would be NULL in any case. But we *are* interested in any oop*
 188   // that potentially need to be updated.
 189 
 190   oop obj = oop(mr.start());
 191   shenandoah_assert_correct(NULL, obj);
 192   if (_heap->is_concurrent_traversal_in_progress()) {
 193     ShenandoahEvacOOMScope oom_evac_scope;
 194     ShenandoahUpdateRefsForOopClosure</* wb = */ true> cl;
 195     obj->oop_iterate(&cl);
 196   } else {
 197     ShenandoahUpdateRefsForOopClosure</* wb = */ false> cl;
 198     obj->oop_iterate(&cl);
 199   }
 200 }
 201 
 202 oop ShenandoahBaseBarrierSet::read_barrier(oop src) {
 203   // Check for forwarded objects, because on Full GC path we might deal with
 204   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 205   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 206   // which provides a bit of performance improvement.
 207   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 208     return ShenandoahBarrierSet::resolve_forwarded(src);
 209   } else {
 210     return src;
 211   }
 212 }
 213 
 214 bool ShenandoahBaseBarrierSet::obj_equals(oop obj1, oop obj2) {
 215   bool eq = oopDesc::unsafe_equals(obj1, obj2);
 216   if (! eq && ShenandoahAcmpBarrier) {
 217     OrderAccess::loadload();
 218     obj1 = resolve_forwarded(obj1);
 219     obj2 = resolve_forwarded(obj2);
 220     eq = oopDesc::unsafe_equals(obj1, obj2);
 221   }
 222   return eq;
 223 }
 224 
 225 oop ShenandoahBaseBarrierSet::write_barrier_mutator(oop obj) {
 226   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 227   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 228   shenandoah_assert_in_cset(NULL, obj);
 229 
 230   oop fwd = resolve_forwarded_not_null(obj);
 231   if (oopDesc::unsafe_equals(obj, fwd)) {
 232     ShenandoahEvacOOMScope oom_evac_scope;
 233 
 234     Thread* thread = Thread::current();
 235     oop res_oop = _heap->evacuate_object(obj, thread);
 236 
 237     // Since we are already here and paid the price of getting through runtime call adapters
 238     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 239     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 240     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 241     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 242     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 243     // their metadata (notably, klasses) may be incorrect already.
 244 
 245     size_t max = ShenandoahEvacAssist;
 246     if (max > 0) {
 247       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 248       // Other code uses complete marking context, because evac happens after the mark.
 249       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 250                                       _heap->marking_context() : _heap->complete_marking_context();
 251 
 252       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 253       assert(r->is_cset(), "sanity");
 254 
 255       HeapWord* cur = (HeapWord*)obj + obj->size() + BrooksPointer::word_size();
 256 
 257       size_t count = 0;
 258       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 259         oop cur_oop = oop(cur);
 260         if (oopDesc::unsafe_equals(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 261           _heap->evacuate_object(cur_oop, thread);
 262         }
 263         cur = cur + cur_oop->size() + BrooksPointer::word_size();
 264       }
 265     }
 266 
 267     return res_oop;
 268   }
 269   return fwd;
 270 }
 271 
 272 oop ShenandoahBaseBarrierSet::write_barrier_impl(oop obj) {
 273   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 274   if (!CompressedOops::is_null(obj)) {
 275     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 276     oop fwd = resolve_forwarded_not_null(obj);
 277     if (evac_in_progress &&
 278         _heap->in_collection_set(obj) &&
 279         oopDesc::unsafe_equals(obj, fwd)) {
 280       Thread *t = Thread::current();
 281       if (t->is_GC_task_thread()) {
 282         return _heap->evacuate_object(obj, t);
 283       } else {
 284         ShenandoahEvacOOMScope oom_evac_scope;
 285         return _heap->evacuate_object(obj, t);
 286       }
 287     } else {
 288       return fwd;
 289     }
 290   } else {
 291     return obj;
 292   }
 293 }
 294 
 295 oop ShenandoahBaseBarrierSet::write_barrier(oop obj) {
 296   if (ShenandoahWriteBarrier) {
 297     return write_barrier_impl(obj);
 298   } else {
 299     return obj;
 300   }
 301 }
 302 
 303 oop ShenandoahBaseBarrierSet::storeval_barrier(oop obj) {
 304   if (ShenandoahStoreValEnqueueBarrier) {
 305     if (!CompressedOops::is_null(obj)) {
 306       obj = write_barrier(obj);
 307       enqueue_barrier(obj);
 308     }
 309   }
 310   if (ShenandoahStoreValReadBarrier) {
 311     obj = resolve_forwarded(obj);
 312   }
 313   return obj;
 314 }
 315 
 316 void ShenandoahBaseBarrierSet::enqueue_barrier(oop obj) {
 317   if (ShenandoahStoreValEnqueueBarrier && obj != NULL) {
 318     enqueue(obj);
 319   }
 320 }
 321 
 322 void ShenandoahBaseBarrierSet::keep_alive_barrier(oop obj) {
 323   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 324     enqueue(obj);
 325   }
 326 }
 327 
 328 void ShenandoahBaseBarrierSet::enqueue(oop obj) {
 329   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
 330   if (!_satb_mark_queue_set.is_active()) return;
 331 
 332   // Filter marked objects before hitting the SATB queues. The same predicate would
 333   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 334   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 335   if (!_heap->requires_marking(obj)) return;
 336 
 337   Thread* thr = Thread::current();
 338   if (thr->is_Java_thread()) {
 339     ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
 340   } else {
 341     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 342     _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
 343   }
 344 }
 345 
 346 void ShenandoahBaseBarrierSet::on_thread_create(Thread* thread) {
 347   // Create thread local data
 348   ShenandoahThreadLocalData::create(thread);
 349 }
 350 
 351 void ShenandoahBaseBarrierSet::on_thread_destroy(Thread* thread) {
 352   // Destroy thread local data
 353   ShenandoahThreadLocalData::destroy(thread);
 354 }
 355 
 356 
 357 void ShenandoahBaseBarrierSet::on_thread_attach(JavaThread* thread) {
 358   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 359   assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
 360   assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
 361   if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
 362     ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
 363   }
 364   ShenandoahThreadLocalData::set_gc_state(thread, ShenandoahHeap::heap()->gc_state());
 365   ShenandoahThreadLocalData::initialize_gclab(thread);
 366 }
 367 
 368 void ShenandoahBaseBarrierSet::on_thread_detach(JavaThread* thread) {
 369   ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
 370   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 371   if (gclab != NULL) {
 372     gclab->retire();
 373   }
 374 }