1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  33 #include "memory/iterator.inline.hpp"
  34 #include "runtime/interfaceSupport.inline.hpp"
  35 #ifdef COMPILER1
  36 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  37 #endif
  38 #ifdef COMPILER2
  39 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  40 #endif
  41 
  42 class ShenandoahBarrierSetC1;
  43 class ShenandoahBarrierSetC2;
  44 
  45 template <bool STOREVAL_WRITE_BARRIER>
  46 class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure {
  47 private:
  48   ShenandoahHeap* _heap;
  49   ShenandoahBarrierSet* _bs;
  50 
  51   template <class T>
  52   inline void do_oop_work(T* p) {
  53     oop o;
  54     if (STOREVAL_WRITE_BARRIER) {
  55       o = _heap->evac_update_with_forwarded(p);
  56       if (!CompressedOops::is_null(o)) {
  57         _bs->enqueue(o);
  58       }
  59     } else {
  60       _heap->maybe_update_with_forwarded(p);
  61     }
  62   }
  63 public:
  64   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
  65     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  66   }
  67 
  68   virtual void do_oop(oop* p)       { do_oop_work(p); }
  69   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  70 };
  71 
  72 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  73   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  74              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  75              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  76              NULL /* barrier_set_nmethod */,
  77              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
  78   _heap(heap),
  79   _satb_mark_queue_set()
  80 {
  81 }
  82 
  83 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  84   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  85   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  86 }
  87 
  88 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  89   st->print("ShenandoahBarrierSet");
  90 }
  91 
  92 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  93   return bsn == BarrierSet::ShenandoahBarrierSet;
  94 }
  95 
  96 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  97   return true;
  98 }
  99 
 100 template <class T, bool STOREVAL_WRITE_BARRIER>
 101 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 102   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 103   ShenandoahUpdateRefsForOopClosure<STOREVAL_WRITE_BARRIER> cl;
 104   T* dst = (T*) start;
 105   for (size_t i = 0; i < count; i++) {
 106     cl.do_oop(dst++);
 107   }
 108 }
 109 
 110 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 111   assert(UseShenandoahGC, "should be enabled");
 112   if (count == 0) return;
 113   if (!ShenandoahCloneBarrier) return;
 114 
 115   if (!need_update_refs_barrier()) return;
 116 
 117   if (_heap->is_concurrent_traversal_in_progress()) {
 118     ShenandoahEvacOOMScope oom_evac_scope;
 119     if (UseCompressedOops) {
 120       write_ref_array_loop<narrowOop, /* wb = */ true>(start, count);
 121     } else {
 122       write_ref_array_loop<oop,       /* wb = */ true>(start, count);
 123     }
 124   } else {
 125     if (UseCompressedOops) {
 126       write_ref_array_loop<narrowOop, /* wb = */ false>(start, count);
 127     } else {
 128       write_ref_array_loop<oop,       /* wb = */ false>(start, count);
 129     }
 130   }
 131 }
 132 
 133 template <class T>
 134 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
 135   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
 136   if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
 137     T* elem_ptr = dst;
 138     for (size_t i = 0; i < count; i++, elem_ptr++) {
 139       T heap_oop = RawAccess<>::oop_load(elem_ptr);
 140       if (!CompressedOops::is_null(heap_oop)) {
 141         enqueue(CompressedOops::decode_not_null(heap_oop));
 142       }
 143     }
 144   }
 145 }
 146 
 147 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
 148   if (! dest_uninitialized) {
 149     write_ref_array_pre_work(dst, count);
 150   }
 151 }
 152 
 153 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
 154   if (! dest_uninitialized) {
 155     write_ref_array_pre_work(dst, count);
 156   }
 157 }
 158 
 159 template <class T>
 160 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 161   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
 162   if (_heap->is_concurrent_mark_in_progress()) {
 163     T heap_oop = RawAccess<>::oop_load(field);
 164     if (!CompressedOops::is_null(heap_oop)) {
 165       enqueue(CompressedOops::decode(heap_oop));
 166     }
 167   }
 168 }
 169 
 170 // These are the more general virtual versions.
 171 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 172   inline_write_ref_field_pre(field, new_val);
 173 }
 174 
 175 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 176   inline_write_ref_field_pre(field, new_val);
 177 }
 178 
 179 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 180   guarantee(false, "Not needed");
 181 }
 182 
 183 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 184   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 185   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 186   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 187 }
 188 
 189 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 190   assert(UseShenandoahGC, "should be enabled");
 191   if (!ShenandoahCloneBarrier) return;
 192   if (! need_update_refs_barrier()) return;
 193 
 194   // This is called for cloning an object (see jvm.cpp) after the clone
 195   // has been made. We are not interested in any 'previous value' because
 196   // it would be NULL in any case. But we *are* interested in any oop*
 197   // that potentially need to be updated.
 198 
 199   oop obj = oop(mr.start());
 200   shenandoah_assert_correct(NULL, obj);
 201   if (_heap->is_concurrent_traversal_in_progress()) {
 202     ShenandoahEvacOOMScope oom_evac_scope;
 203     ShenandoahUpdateRefsForOopClosure</* wb = */ true> cl;
 204     obj->oop_iterate(&cl);
 205   } else {
 206     ShenandoahUpdateRefsForOopClosure</* wb = */ false> cl;
 207     obj->oop_iterate(&cl);
 208   }
 209 }
 210 
 211 oop ShenandoahBarrierSet::read_barrier(oop src) {
 212   // Check for forwarded objects, because on Full GC path we might deal with
 213   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 214   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 215   // which provides a bit of performance improvement.
 216   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 217     return ShenandoahBarrierSet::resolve_forwarded(src);
 218   } else {
 219     return src;
 220   }
 221 }
 222 
 223 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 224   bool eq = oopDesc::equals_raw(obj1, obj2);
 225   if (! eq && ShenandoahAcmpBarrier) {
 226     OrderAccess::loadload();
 227     obj1 = resolve_forwarded(obj1);
 228     obj2 = resolve_forwarded(obj2);
 229     eq = oopDesc::equals_raw(obj1, obj2);
 230   }
 231   return eq;
 232 }
 233 
 234 oop ShenandoahBarrierSet::write_barrier_mutator(oop obj) {
 235   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 236   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 237   shenandoah_assert_in_cset(NULL, obj);
 238 
 239   oop fwd = resolve_forwarded_not_null(obj);
 240   if (oopDesc::equals_raw(obj, fwd)) {
 241     ShenandoahEvacOOMScope oom_evac_scope;
 242 
 243     Thread* thread = Thread::current();
 244     oop res_oop = _heap->evacuate_object(obj, thread);
 245 
 246     // Since we are already here and paid the price of getting through runtime call adapters
 247     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 248     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 249     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 250     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 251     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 252     // their metadata (notably, klasses) may be incorrect already.
 253 
 254     size_t max = ShenandoahEvacAssist;
 255     if (max > 0) {
 256       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 257       // Other code uses complete marking context, because evac happens after the mark.
 258       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 259                                       _heap->marking_context() : _heap->complete_marking_context();
 260 
 261       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 262       assert(r->is_cset(), "sanity");
 263 
 264       HeapWord* cur = (HeapWord*)obj + obj->size() + ShenandoahBrooksPointer::word_size();
 265 
 266       size_t count = 0;
 267       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 268         oop cur_oop = oop(cur);
 269         if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) {
 270           _heap->evacuate_object(cur_oop, thread);
 271         }
 272         cur = cur + cur_oop->size() + ShenandoahBrooksPointer::word_size();
 273       }
 274     }
 275 
 276     return res_oop;
 277   }
 278   return fwd;
 279 }
 280 
 281 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 282   assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
 283   if (!CompressedOops::is_null(obj)) {
 284     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 285     oop fwd = resolve_forwarded_not_null(obj);
 286     if (evac_in_progress &&
 287         _heap->in_collection_set(obj) &&
 288         oopDesc::equals_raw(obj, fwd)) {
 289       Thread *t = Thread::current();
 290       if (t->is_GC_task_thread()) {
 291         return _heap->evacuate_object(obj, t);
 292       } else {
 293         ShenandoahEvacOOMScope oom_evac_scope;
 294         return _heap->evacuate_object(obj, t);
 295       }
 296     } else {
 297       return fwd;
 298     }
 299   } else {
 300     return obj;
 301   }
 302 }
 303 
 304 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 305   if (ShenandoahWriteBarrier && _heap->has_forwarded_objects()) {
 306     return write_barrier_impl(obj);
 307   } else {
 308     return obj;
 309   }
 310 }
 311 
 312 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 313   if (ShenandoahStoreValEnqueueBarrier) {
 314     if (!CompressedOops::is_null(obj)) {
 315       obj = write_barrier(obj);
 316       enqueue(obj);
 317     }
 318   }
 319   if (ShenandoahStoreValReadBarrier) {
 320     obj = resolve_forwarded(obj);
 321   }
 322   return obj;
 323 }
 324 
 325 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 326   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 327     enqueue(obj);
 328   }
 329 }
 330 
 331 void ShenandoahBarrierSet::enqueue(oop obj) {
 332   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 333   if (!_satb_mark_queue_set.is_active()) return;
 334 
 335   // Filter marked objects before hitting the SATB queues. The same predicate would
 336   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 337   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 338   if (!_heap->requires_marking(obj)) return;
 339 
 340   Thread* thr = Thread::current();
 341   if (thr->is_Java_thread()) {
 342     ShenandoahThreadLocalData::satb_mark_queue(thr).enqueue(obj);
 343   } else {
 344     MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
 345     _satb_mark_queue_set.shared_satb_queue()->enqueue(obj);
 346   }
 347 }
 348 
 349 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 350   // Create thread local data
 351   ShenandoahThreadLocalData::create(thread);
 352 }
 353 
 354 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 355   // Destroy thread local data
 356   ShenandoahThreadLocalData::destroy(thread);
 357 }
 358 
 359 void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
 360   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 361   assert(!ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
 362   assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
 363   if (ShenandoahBarrierSet::satb_mark_queue_set().is_active()) {
 364     ShenandoahThreadLocalData::satb_mark_queue(thread).set_active(true);
 365   }
 366   ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
 367   ShenandoahThreadLocalData::initialize_gclab(thread);
 368 }
 369 
 370 void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
 371   ShenandoahThreadLocalData::satb_mark_queue(thread).flush();
 372   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 373   if (gclab != NULL) {
 374     gclab->retire();
 375   }
 376 }