1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  26 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
  31 #include "runtime/interfaceSupport.hpp"
  32 #include "utilities/macros.hpp"
  33 
  34 #ifdef COMPILER1
  35 #include "gc_implementation/shenandoah/shenandoahBarrierSetC1.hpp"
  36 #endif
  37 #ifdef COMPILER2
  38 #include "gc_implementation/shenandoah/shenandoahBarrierSetC2.hpp"
  39 #endif
  40 
  41 #if defined(TARGET_ARCH_aarch64)
  42 #include "shenandoahBarrierSetAssembler_aarch64.hpp"
  43 #elif defined(TARGET_ARCH_x86)
  44 #include "shenandoahBarrierSetAssembler_x86.hpp"
  45 #else
  46 #include "shenandoahBarrierSetAssembler_stub.hpp"
  47 #endif
  48 
  49 template <bool STOREVAL_EVAC_BARRIER>
  50 class ShenandoahUpdateRefsForOopClosure: public ExtendedOopClosure {
  51 private:
  52   ShenandoahHeap* _heap;
  53   ShenandoahBarrierSet* _bs;
  54 
  55   template <class T>
  56   inline void do_oop_work(T* p) {
  57     oop o;
  58     if (STOREVAL_EVAC_BARRIER) {
  59       o = _heap->evac_update_with_forwarded(p);
  60       if (!oopDesc::is_null(o)) {
  61         _bs->enqueue(o);
  62       }
  63     } else {
  64       _heap->maybe_update_with_forwarded(p);
  65     }
  66   }
  67 public:
  68   ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) {
  69     assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
  70   }
  71 
  72   virtual void do_oop(oop* p)       { do_oop_work(p); }
  73   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  74 };
  75 
  76 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  77   BarrierSet(),
  78   _heap(heap),
  79   _bsasm(new ShenandoahBarrierSetAssembler()),
  80   _bsc1(COMPILER1_PRESENT(new ShenandoahBarrierSetC1()) NOT_COMPILER1(NULL)),
  81   _bsc2(COMPILER2_PRESENT(new ShenandoahBarrierSetC2()) NOT_COMPILER2(NULL))
  82 {
  83   _kind = BarrierSet::ShenandoahBarrierSet;
  84 }
  85 
  86 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::bsasm() const {
  87   return _bsasm;
  88 }
  89 
  90 ShenandoahBarrierSetC1* ShenandoahBarrierSet::bsc1() const {
  91   return _bsc1;
  92 }
  93 
  94 ShenandoahBarrierSetC2* ShenandoahBarrierSet::bsc2() const {
  95   return _bsc2;
  96 }
  97 
  98 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  99   st->print("ShenandoahBarrierSet");
 100 }
 101 
 102 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
 103   return bsn == BarrierSet::ShenandoahBarrierSet;
 104 }
 105 
 106 bool ShenandoahBarrierSet::has_read_prim_array_opt() {
 107   return true;
 108 }
 109 
 110 bool ShenandoahBarrierSet::has_read_prim_barrier() {
 111   return false;
 112 }
 113 
 114 bool ShenandoahBarrierSet::has_read_ref_array_opt() {
 115   return true;
 116 }
 117 
 118 bool ShenandoahBarrierSet::has_read_ref_barrier() {
 119   return false;
 120 }
 121 
 122 bool ShenandoahBarrierSet::has_read_region_opt() {
 123   return true;
 124 }
 125 
 126 bool ShenandoahBarrierSet::has_write_prim_array_opt() {
 127   return true;
 128 }
 129 
 130 bool ShenandoahBarrierSet::has_write_prim_barrier() {
 131   return false;
 132 }
 133 
 134 bool ShenandoahBarrierSet::has_write_ref_array_opt() {
 135   return true;
 136 }
 137 
 138 bool ShenandoahBarrierSet::has_write_ref_barrier() {
 139   return true;
 140 }
 141 
 142 bool ShenandoahBarrierSet::has_write_ref_pre_barrier() {
 143   return true;
 144 }
 145 
 146 bool ShenandoahBarrierSet::has_write_region_opt() {
 147   return true;
 148 }
 149 
 150 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
 151   return true;
 152 }
 153 
 154 bool ShenandoahBarrierSet::read_prim_needs_barrier(HeapWord* hw, size_t s) {
 155   return false;
 156 }
 157 
 158 void ShenandoahBarrierSet::read_ref_field(void* v) {
 159   //    tty->print_cr("read_ref_field: v = "PTR_FORMAT, v);
 160   // return *v;
 161 }
 162 
 163 template <class T, bool STOREVAL_EVAC_BARRIER>
 164 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
 165   assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
 166   ShenandoahUpdateRefsForOopClosure<STOREVAL_EVAC_BARRIER> cl;
 167   T* dst = (T*) start;
 168   for (size_t i = 0; i < count; i++) {
 169     cl.do_oop(dst++);
 170   }
 171 }
 172 
 173 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
 174   assert(UseShenandoahGC, "should be enabled");
 175   if (!ShenandoahCloneBarrier) return;
 176   if (!need_update_refs_barrier()) return;
 177 
 178   if (_heap->is_concurrent_traversal_in_progress()) {
 179     ShenandoahEvacOOMScope oom_evac_scope;
 180     if (UseCompressedOops) {
 181       write_ref_array_loop<narrowOop, /* evac = */ true>(start, count);
 182     } else {
 183       write_ref_array_loop<oop,       /* evac = */ true>(start, count);
 184     }
 185   } else {
 186     if (UseCompressedOops) {
 187       write_ref_array_loop<narrowOop, /* evac = */ false>(start, count);
 188     } else {
 189       write_ref_array_loop<oop,       /* evac = */ false>(start, count);
 190     }
 191   }
 192 }
 193 
 194 template <class T>
 195 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
 196   assert (UseShenandoahGC && ShenandoahSATBBarrier, "Should be enabled");
 197 
 198   shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
 199 
 200   if (! JavaThread::satb_mark_queue_set().is_active()) return;
 201   T* elem_ptr = dst;
 202   for (size_t i = 0; i < count; i++, elem_ptr++) {
 203     T heap_oop = oopDesc::load_heap_oop(elem_ptr);
 204     if (!oopDesc::is_null(heap_oop)) {
 205       enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
 206     }
 207   }
 208 }
 209 
 210 void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
 211   if (! dest_uninitialized && ShenandoahSATBBarrier) {
 212     write_ref_array_pre_work(dst, (size_t)count);
 213   }
 214 }
 215 
 216 void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
 217   if (! dest_uninitialized && ShenandoahSATBBarrier) {
 218     write_ref_array_pre_work(dst, (size_t)count);
 219   }
 220 }
 221 
 222 template <class T>
 223 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop newVal) {
 224   newVal = load_reference_barrier(newVal);
 225   storeval_barrier(newVal);
 226   if (ShenandoahSATBBarrier) {
 227     T heap_oop = oopDesc::load_heap_oop(field);
 228     shenandoah_assert_not_in_cset_loc_except(field, ShenandoahHeap::heap()->cancelled_gc());
 229     if (!oopDesc::is_null(heap_oop)) {
 230       ShenandoahBarrierSet::barrier_set()->enqueue(oopDesc::decode_heap_oop(heap_oop));
 231     }
 232   }
 233 }
 234 
 235 // These are the more general virtual versions.
 236 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 237   inline_write_ref_field_pre(field, new_val);
 238 }
 239 
 240 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 241   inline_write_ref_field_pre(field, new_val);
 242 }
 243 
 244 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 245   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 246   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 247   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 248 }
 249 
 250 void ShenandoahBarrierSet::write_region_work(MemRegion mr) {
 251   assert(UseShenandoahGC, "should be enabled");
 252   if (!ShenandoahCloneBarrier) return;
 253   if (! need_update_refs_barrier()) return;
 254 
 255   // This is called for cloning an object (see jvm.cpp) after the clone
 256   // has been made. We are not interested in any 'previous value' because
 257   // it would be NULL in any case. But we *are* interested in any oop*
 258   // that potentially need to be updated.
 259 
 260   oop obj = oop(mr.start());
 261   shenandoah_assert_correct(NULL, obj);
 262   if (_heap->is_concurrent_traversal_in_progress()) {
 263     ShenandoahEvacOOMScope oom_evac_scope;
 264     ShenandoahUpdateRefsForOopClosure</* evac = */ true> cl;
 265     obj->oop_iterate(&cl);
 266   } else {
 267     ShenandoahUpdateRefsForOopClosure</* evac = */ false> cl;
 268     obj->oop_iterate(&cl);
 269   }
 270 }
 271 
 272 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 273   assert(obj != NULL, "");
 274   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 275     return load_reference_barrier_impl(obj);
 276   } else {
 277     return obj;
 278   }
 279 }
 280 
 281 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 282   if (obj != NULL) {
 283     return load_reference_barrier_not_null(obj);
 284   } else {
 285     return obj;
 286   }
 287 }
 288 
 289 
 290 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) {
 291   assert(ShenandoahLoadRefBarrier, "should be enabled");
 292   assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress");
 293   shenandoah_assert_in_cset(NULL, obj);
 294 
 295   oop fwd = resolve_forwarded_not_null(obj);
 296   if (obj == fwd) {
 297     ShenandoahEvacOOMScope oom_evac_scope;
 298 
 299     Thread* thread = Thread::current();
 300     oop res_oop = _heap->evacuate_object(obj, thread);
 301 
 302     // Since we are already here and paid the price of getting through runtime call adapters
 303     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 304     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 305     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 306     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 307     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 308     // their metadata (notably, klasses) may be incorrect already.
 309 
 310     size_t max = ShenandoahEvacAssist;
 311     if (max > 0) {
 312       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 313       // Other code uses complete marking context, because evac happens after the mark.
 314       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 315                                       _heap->marking_context() : _heap->complete_marking_context();
 316 
 317       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 318       assert(r->is_cset(), "sanity");
 319 
 320       HeapWord* cur = (HeapWord*)obj + obj->size();
 321 
 322       size_t count = 0;
 323       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 324         oop cur_oop = oop(cur);
 325         if (cur_oop == resolve_forwarded_not_null(cur_oop)) {
 326           _heap->evacuate_object(cur_oop, thread);
 327         }
 328         cur = cur + cur_oop->size();
 329       }
 330     }
 331 
 332     return res_oop;
 333   }
 334   return fwd;
 335 }
 336 
 337 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 338   assert(ShenandoahLoadRefBarrier, "should be enabled");
 339   if (!oopDesc::is_null(obj)) {
 340     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 341     oop fwd = resolve_forwarded_not_null(obj);
 342     if (evac_in_progress &&
 343         _heap->in_collection_set(obj) &&
 344         obj == fwd) {
 345       Thread *t = Thread::current();
 346       if (t->is_GC_task_thread()) {
 347         return _heap->evacuate_object(obj, t);
 348       } else {
 349         ShenandoahEvacOOMScope oom_evac_scope;
 350         return _heap->evacuate_object(obj, t);
 351       }
 352     } else {
 353       return fwd;
 354     }
 355   } else {
 356     return obj;
 357   }
 358 }
 359 
 360 void ShenandoahBarrierSet::storeval_barrier(oop obj) {
 361   if (ShenandoahStoreValEnqueueBarrier && !oopDesc::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
 362     enqueue(obj);
 363   }
 364 }
 365 
 366 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 367   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 368     enqueue(obj);
 369   }
 370 }
 371 
 372 void ShenandoahBarrierSet::enqueue(oop obj) {
 373   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 374 
 375   // Filter marked objects before hitting the SATB queues. The same predicate would
 376   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 377   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 378   if (!_heap->requires_marking(obj)) return;
 379 
 380   G1SATBCardTableModRefBS::enqueue(obj);
 381 }
 382 
 383 oop ShenandoahBarrierSet::atomic_compare_exchange_oop(oop exchange_value,
 384                                                       volatile HeapWord *dest,
 385                                                       oop compare_value) {
 386   if (UseCompressedOops) {
 387     // encode exchange and compare value from oop to T
 388     narrowOop val = oopDesc::encode_heap_oop(exchange_value);
 389     narrowOop cmp = oopDesc::encode_heap_oop(compare_value);
 390 
 391     narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
 392     // decode old from T to oop
 393     return oopDesc::decode_heap_oop(old);
 394   } else {
 395     return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
 396   }
 397 }
 398 
 399 oop ShenandoahBarrierSet::oop_atomic_cmpxchg_in_heap(oop new_value, volatile HeapWord* dest, oop compare_value) {
 400   oop expected;
 401   bool success;
 402   do {
 403     expected = compare_value;
 404     compare_value = atomic_compare_exchange_oop(new_value, dest, expected);
 405     success = (compare_value == expected);
 406   } while ((! success) && resolve_forwarded(compare_value) == resolve_forwarded(expected));
 407   oop result = load_reference_barrier(compare_value);
 408   if (ShenandoahSATBBarrier && success && result != NULL) {
 409     enqueue(result);
 410   }
 411   if (new_value != NULL) {
 412     storeval_barrier(new_value);
 413   }
 414   return result;
 415 }