1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/g1/g1BarrierSet.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "runtime/interfaceSupport.inline.hpp"
  33 
  34 template <bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER, bool ALWAYS_ENQUEUE>
  35 class ShenandoahUpdateRefsForOopClosure: public ExtendedOopClosure {
  36 private:
  37   ShenandoahHeap* _heap;
  38   template <class T>
  39   inline void do_oop_work(T* p) {
  40     oop o;
  41     if (STOREVAL_WRITE_BARRIER) {
  42       bool evac;
  43       o = _heap->evac_update_with_forwarded(p, evac);
  44       if ((ALWAYS_ENQUEUE || evac) && !oopDesc::is_null(o)) {
  45         ShenandoahBarrierSet::enqueue(o);
  46       }
  47     } else {
  48       o = _heap->maybe_update_with_forwarded(p);
  49     }
  50     if (UPDATE_MATRIX && !oopDesc::is_null(o)) {
  51       _heap->connection_matrix()->set_connected(p, o);
  52     }
  53   }
  54 public:
  55   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()) {
  56     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  57   }
  58   void do_oop(oop* p)       { do_oop_work(p); }
  59   void do_oop(narrowOop* p) { do_oop_work(p); }
  60 };
  61 
  62 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  63   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  64              BarrierSet::FakeRtti(BarrierSet::Shenandoah)),
  65   _heap(heap)
  66 {
  67 }
  68 
  69 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  70   st->print("ShenandoahBarrierSet");
  71 }
  72 
  73 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  74   return bsn == BarrierSet::Shenandoah;
  75 }
  76 
  77 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  78   return true;
  79 }
  80 
  81 void ShenandoahBarrierSet::resize_covered_region(MemRegion mr) {
  82   Unimplemented();
  83 }
  84 
  85 bool ShenandoahBarrierSet::need_update_refs_barrier() {
  86   if (UseShenandoahMatrix || _heap->is_concurrent_traversal_in_progress()) {
  87     return true;
  88   }
  89   if (_heap->shenandoahPolicy()->update_refs()) {
  90     return _heap->is_update_refs_in_progress();
  91   } else {
  92     return _heap->is_concurrent_mark_in_progress() && _heap->has_forwarded_objects();
  93   }
  94 }
  95 
  96 void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) {
  97   ShouldNotReachHere();
  98 }
  99 
 100 template <class T, bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER, bool ALWAYS_ENQUEUE>
 101 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 102   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 103   ShenandoahUpdateRefsForOopClosure<UPDATE_MATRIX, STOREVAL_WRITE_BARRIER, ALWAYS_ENQUEUE> cl;
 104   ShenandoahEvacOOMScope oom_evac_scope;
 105   T* dst = (T*) start;
 106   for (size_t i = 0; i < count; i++) {
 107     cl.do_oop(dst++);
 108   }
 109 }
 110 
 111 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 112   assert(UseShenandoahGC, "should be enabled");
 113   if (!ShenandoahCloneBarrier) return;
 114   if (!need_update_refs_barrier()) return;
 115 
 116   if (UseShenandoahMatrix) {
 117     assert(! _heap->is_concurrent_traversal_in_progress(), "traversal GC should take another branch");
 118     if (_heap->is_concurrent_partial_in_progress()) {
 119       if (UseCompressedOops) {
 120         write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ true,  /* enqueue = */ false>(start, count);
 121       } else {
 122         write_ref_array_loop<oop,       /* matrix = */ true, /* wb = */ true,  /* enqueue = */ false>(start, count);
 123       }
 124     } else {
 125       if (UseCompressedOops) {
 126         write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ false, /* enqueue = */ false>(start, count);
 127       } else {
 128         write_ref_array_loop<oop,       /* matrix = */ true, /* wb = */ false, /* enqueue = */ false>(start, count);
 129       }
 130     }
 131   } else if (_heap->is_concurrent_traversal_in_progress()) {
 132     if (UseCompressedOops) {
 133       write_ref_array_loop<narrowOop,   /* matrix = */ false, /* wb = */ true, /* enqueue = */ true>(start, count);
 134     } else {
 135       write_ref_array_loop<oop,         /* matrix = */ false, /* wb = */ true, /* enqueue = */ true>(start, count);
 136     }
 137   } else {
 138     if (UseCompressedOops) {
 139       write_ref_array_loop<narrowOop,   /* matrix = */ false, /* wb = */ false, /* enqueue = */ false>(start, count);
 140     } else {
 141       write_ref_array_loop<oop,         /* matrix = */ false, /* wb = */ false, /* enqueue = */ false>(start, count);
 142     }
 143   }
 144 }
 145 
 146 void ShenandoahBarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
 147   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 148   bs->write_ref_array_pre(dst, length, false);
 149 }
 150 
 151 void ShenandoahBarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
 152   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 153   bs->write_ref_array_pre(dst, length, false);
 154 }
 155 
 156 void ShenandoahBarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
 157   ShenandoahBarrierSet *bs = barrier_set_cast<ShenandoahBarrierSet>(BarrierSet::barrier_set());
 158   bs->ShenandoahBarrierSet::write_ref_array(dst, length);
 159 }
 160 
 161 
 162 template <class T>
 163 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, int count) {
 164   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_concgc());
 165   if (ShenandoahSATBBarrier ||
 166       (ShenandoahConditionalSATBBarrier && _heap->is_concurrent_mark_in_progress())) {
 167     T* elem_ptr = dst;
 168     for (int i = 0; i < count; i++, elem_ptr++) {
 169       T heap_oop = oopDesc::load_heap_oop(elem_ptr);
 170       if (!oopDesc::is_null(heap_oop)) {
 171         enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
 172       }
 173     }
 174   }
 175 }
 176 
 177 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
 178   if (! dest_uninitialized) {
 179     write_ref_array_pre_work(dst, count);
 180   }
 181 }
 182 
 183 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
 184   if (! dest_uninitialized) {
 185     write_ref_array_pre_work(dst, count);
 186   }
 187 }
 188 
 189 template <class T>
 190 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 191   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_concgc());
 192   if (_heap->is_concurrent_mark_in_progress()) {
 193     T heap_oop = oopDesc::load_heap_oop(field);
 194     if (!oopDesc::is_null(heap_oop)) {
 195       enqueue(oopDesc::decode_heap_oop(heap_oop));
 196     }
 197   }
 198   if (UseShenandoahMatrix && ! oopDesc::is_null(new_val)) {
 199     ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 200     matrix->set_connected(field, new_val);
 201   }
 202 }
 203 
 204 // These are the more general virtual versions.
 205 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 206   inline_write_ref_field_pre(field, new_val);
 207 }
 208 
 209 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 210   inline_write_ref_field_pre(field, new_val);
 211 }
 212 
 213 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 214   guarantee(false, "Not needed");
 215 }
 216 
 217 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 218   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_concgc());
 219   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress());
 220   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_concgc() || !_heap->is_concurrent_mark_in_progress());
 221 }
 222 
 223 void ShenandoahBarrierSet::write_region(MemRegion mr) {
 224   assert(UseShenandoahGC, "should be enabled");
 225   if (!ShenandoahCloneBarrier) return;
 226   if (! need_update_refs_barrier()) return;
 227 
 228   // This is called for cloning an object (see jvm.cpp) after the clone
 229   // has been made. We are not interested in any 'previous value' because
 230   // it would be NULL in any case. But we *are* interested in any oop*
 231   // that potentially need to be updated.
 232 
 233   ShenandoahEvacOOMScope oom_evac_scope;
 234   oop obj = oop(mr.start());
 235   assert(oopDesc::is_oop(obj), "must be an oop");
 236   if (UseShenandoahMatrix) {
 237     assert(! _heap->is_concurrent_traversal_in_progress(), "traversal GC should take another branch");
 238     if (_heap->is_concurrent_partial_in_progress()) {
 239       ShenandoahUpdateRefsForOopClosure<true, true, false> cl;
 240       obj->oop_iterate(&cl);
 241     } else {
 242       ShenandoahUpdateRefsForOopClosure<true, false, false> cl;
 243       obj->oop_iterate(&cl);
 244     }
 245   } else {
 246     assert(! _heap->is_concurrent_partial_in_progress(), "partial GC needs matrix");
 247     if (_heap->is_concurrent_traversal_in_progress()) {
 248       ShenandoahUpdateRefsForOopClosure<false, true, true> cl;
 249       obj->oop_iterate(&cl);
 250     } else {
 251       ShenandoahUpdateRefsForOopClosure<false, false, false> cl;
 252       obj->oop_iterate(&cl);
 253     }
 254   }
 255 }
 256 
 257 oop ShenandoahBarrierSet::read_barrier(oop src) {
 258   // Check for forwarded objects, because on Full GC path we might deal with
 259   // non-trivial fwdptrs that contain Full GC specific metadata. We could check
 260   // for is_full_gc_in_progress(), but this also covers the case of stable heap,
 261   // which provides a bit of performance improvement.
 262   if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
 263     return ShenandoahBarrierSet::resolve_forwarded(src);
 264   } else {
 265     return src;
 266   }
 267 }
 268 
 269 bool ShenandoahBarrierSet::obj_equals(oop obj1, oop obj2) {
 270   bool eq = oopDesc::unsafe_equals(obj1, obj2);
 271   if (! eq && ShenandoahAcmpBarrier) {
 272     OrderAccess::loadload();
 273     obj1 = resolve_forwarded(obj1);
 274     obj2 = resolve_forwarded(obj2);
 275     eq = oopDesc::unsafe_equals(obj1, obj2);
 276   }
 277   return eq;
 278 }
 279 
 280 JRT_LEAF(oopDesc*, ShenandoahBarrierSet::write_barrier_JRT(oopDesc* src))
 281   oop result = ((ShenandoahBarrierSet*)BarrierSet::barrier_set())->write_barrier(src);
 282   return (oopDesc*) result;
 283 JRT_END
 284 
 285 IRT_LEAF(oopDesc*, ShenandoahBarrierSet::write_barrier_IRT(oopDesc* src))
 286   oop result = ((ShenandoahBarrierSet*)BarrierSet::barrier_set())->write_barrier(src);
 287   return (oopDesc*) result;
 288 IRT_END
 289 
 290 oop ShenandoahBarrierSet::write_barrier_impl(oop obj) {
 291   assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier), "should be enabled");
 292   if (!oopDesc::is_null(obj)) {
 293     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL);
 294     oop fwd = resolve_forwarded_not_null(obj);
 295     if (evac_in_progress &&
 296         _heap->in_collection_set(obj) &&
 297         oopDesc::unsafe_equals(obj, fwd)) {
 298       ShenandoahEvacOOMScope oom_evac_scope;
 299       bool evac;
 300       oop copy = _heap->evacuate_object(obj, Thread::current(), evac);
 301       if (evac && _heap->is_concurrent_partial_in_progress()) {
 302         enqueue(copy);
 303       }
 304       return copy;
 305     } else {
 306       return fwd;
 307     }
 308   } else {
 309     return obj;
 310   }
 311 }
 312 
 313 oop ShenandoahBarrierSet::write_barrier(oop obj) {
 314   if (ShenandoahWriteBarrier) {
 315     return write_barrier_impl(obj);
 316   } else {
 317     return obj;
 318   }
 319 }
 320 
 321 oop ShenandoahBarrierSet::storeval_barrier(oop obj) {
 322   if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
 323     obj = write_barrier(obj);
 324   }
 325   if (ShenandoahStoreValEnqueueBarrier && !oopDesc::is_null(obj)) {
 326     enqueue(obj);
 327   }
 328   if (ShenandoahStoreValReadBarrier) {
 329     obj = resolve_forwarded(obj);
 330   }
 331   return obj;
 332 }
 333 
 334 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 335   if (ShenandoahKeepAliveBarrier) {
 336     if (_heap->is_concurrent_mark_in_progress()) {
 337       enqueue(obj);
 338     } else if (_heap->is_concurrent_partial_in_progress()) {
 339       write_barrier_impl(obj);
 340     }
 341   }
 342 }
 343 
 344 void ShenandoahBarrierSet::enqueue(oop obj) {
 345   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
 346   G1BarrierSet::enqueue(obj);
 347 }
 348 
 349 #ifdef ASSERT
 350 void ShenandoahBarrierSet::verify_safe_oop(oop p) {
 351   shenandoah_assert_not_in_cset_except(NULL, p, (p == NULL) || ShenandoahHeap::heap()->cancelled_concgc());
 352 }
 353 #endif
 354 
 355 void ShenandoahBarrierSet::on_thread_attach(JavaThread* thread) {
 356   assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
 357   assert(!thread->satb_mark_queue().is_active(), "SATB queue should not be active");
 358   assert(thread->satb_mark_queue().is_empty(), "SATB queue should be empty");
 359   if (thread->satb_mark_queue_set().is_active()) {
 360     thread->satb_mark_queue().set_active(true);
 361   }
 362   thread->set_gc_state(JavaThread::gc_state_global());
 363 }
 364 
 365 void ShenandoahBarrierSet::on_thread_detach(JavaThread* thread) {
 366   thread->satb_mark_queue().flush();
 367   if (UseTLAB && thread->gclab().is_initialized()) {
 368     thread->gclab().make_parsable(true);
 369   }
 370 }