1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/shenandoahAsserts.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  33 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  34 #include "memory/iterator.inline.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #ifdef COMPILER1
  37 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  38 #endif
  39 #ifdef COMPILER2
  40 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  41 #endif
  42 
  43 class ShenandoahBarrierSetC1;
  44 class ShenandoahBarrierSetC2;
  45 
  46 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  47   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  48              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  49              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  50              NULL /* barrier_set_nmethod */,
  51              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
  52   _heap(heap),
  53   _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
  54   _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  55 {
  56 }
  57 
  58 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  59   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  60   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  61 }
  62 
  63 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  64   st->print("ShenandoahBarrierSet");
  65 }
  66 
  67 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  68   return bsn == BarrierSet::ShenandoahBarrierSet;
  69 }
  70 
  71 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  72   return true;
  73 }
  74 
  75 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
  76   if (!ShenandoahLoadRefBarrier) return false;
  77   // Only needed for references
  78   if (!is_reference_type(type)) return false;
  79   return true;
  80 }
  81 
  82 bool ShenandoahBarrierSet::use_native_load_reference_barrier(DecoratorSet decorators, BasicType type) {
  83   assert(need_load_reference_barrier(decorators, type), "Why ask?");
  84   assert(is_reference_type(type), "Why we here?");
  85   // Native load reference barrier is only needed for concurrent root processing
  86   if (!ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
  87     return false;
  88   }
  89 
  90   return (decorators & IN_NATIVE) != 0;
  91 }
  92 
  93 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) {
  94   if (!ShenandoahKeepAliveBarrier) return false;
  95   // Only needed for references
  96   if (!is_reference_type(type)) return false;
  97 
  98   bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
  99   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 100   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 101   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 102   return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode);
 103 }
 104 
 105 template <class T>
 106 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
 107   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
 108   if (_heap->is_concurrent_mark_in_progress()) {
 109     T heap_oop = RawAccess<>::oop_load(field);
 110     if (!CompressedOops::is_null(heap_oop)) {
 111       enqueue(CompressedOops::decode(heap_oop));
 112     }
 113   }
 114 }
 115 
 116 // These are the more general virtual versions.
 117 void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) {
 118   inline_write_ref_field_pre(field, new_val);
 119 }
 120 
 121 void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) {
 122   inline_write_ref_field_pre(field, new_val);
 123 }
 124 
 125 void ShenandoahBarrierSet::write_ref_field_pre_work(void* field, oop new_val) {
 126   guarantee(false, "Not needed");
 127 }
 128 
 129 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
 130   shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
 131   shenandoah_assert_not_forwarded_except  (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 132   shenandoah_assert_not_in_cset_except    (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
 133 }
 134 
 135 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 136   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 137     return load_reference_barrier_impl(obj);
 138   } else {
 139     return obj;
 140   }
 141 }
 142 
 143 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 144   if (obj != NULL) {
 145     return load_reference_barrier_not_null(obj);
 146   } else {
 147     return obj;
 148   }
 149 }
 150 
 151 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) {
 152   return load_reference_barrier_mutator_work(obj, load_addr);
 153 }
 154 
 155 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) {
 156   return load_reference_barrier_mutator_work(obj, load_addr);
 157 }
 158 
 159 template <class T>
 160 oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) {
 161   assert(ShenandoahLoadRefBarrier, "should be enabled");
 162   shenandoah_assert_in_cset(load_addr, obj);
 163 
 164   oop fwd = resolve_forwarded_not_null(obj);
 165   if (obj == fwd) {
 166     assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL),
 167            "evac should be in progress");
 168 
 169     ShenandoahEvacOOMScope oom_evac_scope;
 170 
 171     Thread* thread = Thread::current();
 172     oop res_oop = _heap->evacuate_object(obj, thread);
 173 
 174     // Since we are already here and paid the price of getting through runtime call adapters
 175     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 176     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 177     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 178     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 179     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 180     // their metadata (notably, klasses) may be incorrect already.
 181 
 182     size_t max = ShenandoahEvacAssist;
 183     if (max > 0) {
 184       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 185       // Other code uses complete marking context, because evac happens after the mark.
 186       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 187                                       _heap->marking_context() : _heap->complete_marking_context();
 188 
 189       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 190       assert(r->is_cset(), "sanity");
 191 
 192       HeapWord* cur = (HeapWord*)obj + obj->size();
 193 
 194       size_t count = 0;
 195       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 196         oop cur_oop = oop(cur);
 197         if (cur_oop == resolve_forwarded_not_null(cur_oop)) {
 198           _heap->evacuate_object(cur_oop, thread);
 199         }
 200         cur = cur + cur_oop->size();
 201       }
 202     }
 203 
 204     fwd = res_oop;
 205   }
 206 
 207   if (load_addr != NULL && fwd != obj) {
 208     // Since we are here and we know the load address, update the reference.
 209     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 210   }
 211 
 212   return fwd;
 213 }
 214 
 215 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 216   assert(ShenandoahLoadRefBarrier, "should be enabled");
 217   if (!CompressedOops::is_null(obj)) {
 218     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 219     oop fwd = resolve_forwarded_not_null(obj);
 220     if (evac_in_progress &&
 221         _heap->in_collection_set(obj) &&
 222         obj == fwd) {
 223       Thread *t = Thread::current();
 224       if (t->is_GC_task_thread()) {
 225         return _heap->evacuate_object(obj, t);
 226       } else {
 227         ShenandoahEvacOOMScope oom_evac_scope;
 228         return _heap->evacuate_object(obj, t);
 229       }
 230     } else {
 231       return fwd;
 232     }
 233   } else {
 234     return obj;
 235   }
 236 }
 237 
 238 void ShenandoahBarrierSet::storeval_barrier(oop obj) {
 239   if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj) && _heap->is_concurrent_traversal_in_progress()) {
 240     enqueue(obj);
 241   }
 242 }
 243 
 244 void ShenandoahBarrierSet::keep_alive_barrier(oop obj) {
 245   if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) {
 246     enqueue(obj);
 247   }
 248 }
 249 
 250 void ShenandoahBarrierSet::enqueue(oop obj) {
 251   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_traversal_in_progress());
 252   assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
 253 
 254   // Filter marked objects before hitting the SATB queues. The same predicate would
 255   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
 256   // filtering here helps to avoid wasteful SATB queueing work to begin with.
 257   if (!_heap->requires_marking<false>(obj)) return;
 258 
 259   ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
 260 }
 261 
 262 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 263   // Create thread local data
 264   ShenandoahThreadLocalData::create(thread);
 265 }
 266 
 267 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 268   // Destroy thread local data
 269   ShenandoahThreadLocalData::destroy(thread);
 270 }
 271 
 272 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
 273   assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
 274          "We should not be at a safepoint");
 275   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 276   assert(!queue.is_active(), "SATB queue should not be active");
 277   assert( queue.is_empty(),  "SATB queue should be empty");
 278   queue.set_active(_satb_mark_queue_set.is_active());
 279   if (thread->is_Java_thread()) {
 280     ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
 281     ShenandoahThreadLocalData::initialize_gclab(thread);
 282   }
 283 }
 284 
 285 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
 286   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 287   queue.flush();
 288   if (thread->is_Java_thread()) {
 289     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 290     if (gclab != NULL) {
 291       gclab->retire();
 292     }
 293   }
 294 }
 295 
 296 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, oop* load_addr) {
 297   return load_reference_barrier_native_impl(obj, load_addr);
 298 }
 299 
 300 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, narrowOop* load_addr) {
 301   // Assumption: narrow oop version should not be used anywhere.
 302   ShouldNotReachHere();
 303   return NULL;
 304 }
 305 
 306 template <class T>
 307 oop ShenandoahBarrierSet::load_reference_barrier_native_impl(oop obj, T* load_addr) {
 308   if (CompressedOops::is_null(obj)) {
 309     return NULL;
 310   }
 311 
 312   ShenandoahMarkingContext* const marking_context = _heap->marking_context();
 313   if (_heap->is_evacuation_in_progress() && !marking_context->is_marked(obj)) {
 314     Thread* thr = Thread::current();
 315     if (thr->is_Java_thread()) {
 316       return NULL;
 317     } else {
 318       return obj;
 319     }
 320   }
 321 
 322   oop fwd = load_reference_barrier_not_null(obj);
 323   if (load_addr != NULL && fwd != obj) {
 324     // Since we are here and we know the load address, update the reference.
 325     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 326   }
 327 
 328   return fwd;
 329 }
 330 
 331 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
 332   if (_heap->has_forwarded_objects()) {
 333     clone_barrier(src);
 334   }
 335 }
 336