1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/shenandoahAsserts.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
  30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  31 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  34 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  35 #include "memory/iterator.inline.hpp"
  36 #include "runtime/interfaceSupport.inline.hpp"
  37 #ifdef COMPILER1
  38 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  39 #endif
  40 #ifdef COMPILER2
  41 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  42 #endif
  43 
  44 class ShenandoahBarrierSetC1;
  45 class ShenandoahBarrierSetC2;
  46 
  47 static BarrierSetNMethod* make_barrier_set_nmethod(ShenandoahHeap* heap) {
  48   // NMethod barriers are only used when concurrent nmethod unloading is enabled
  49   if (!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
  50     return NULL;
  51   }
  52   return new ShenandoahBarrierSetNMethod(heap);
  53 }
  54 
  55 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  56   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  57              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  58              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  59              make_barrier_set_nmethod(heap),
  60              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
  61   _heap(heap),
  62   _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
  63   _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  64 {
  65 }
  66 
  67 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  68   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  69   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  70 }
  71 
  72 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  73   st->print("ShenandoahBarrierSet");
  74 }
  75 
  76 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  77   return bsn == BarrierSet::ShenandoahBarrierSet;
  78 }
  79 
  80 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  81   return true;
  82 }
  83 
  84 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
  85   if (!ShenandoahLoadRefBarrier) return false;
  86   // Only needed for references
  87   return is_reference_type(type);
  88 }
  89 
  90 bool ShenandoahBarrierSet::use_load_reference_barrier_native(DecoratorSet decorators, BasicType type) {
  91   assert(need_load_reference_barrier(decorators, type), "Should be subset of LRB");
  92   assert(is_reference_type(type), "Why we here?");
  93   // Native load reference barrier is only needed for concurrent root processing
  94   if (!ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
  95     return false;
  96   }
  97 
  98   return (decorators & IN_NATIVE) != 0;
  99 }
 100 
 101 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) {
 102   if (!ShenandoahKeepAliveBarrier) return false;
 103   // Only needed for references
 104   if (!is_reference_type(type)) return false;
 105 
 106   bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 107   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 108   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 109   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 110   return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode);
 111 }
 112 
 113 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 114   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 115     return load_reference_barrier_impl(obj);
 116   } else {
 117     return obj;
 118   }
 119 }
 120 
 121 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 122   if (obj != NULL) {
 123     return load_reference_barrier_not_null(obj);
 124   } else {
 125     return obj;
 126   }
 127 }
 128 
 129 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) {
 130   return load_reference_barrier_mutator_work(obj, load_addr);
 131 }
 132 
 133 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) {
 134   return load_reference_barrier_mutator_work(obj, load_addr);
 135 }
 136 
 137 template <class T>
 138 oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) {
 139   assert(ShenandoahLoadRefBarrier, "should be enabled");
 140   shenandoah_assert_in_cset(load_addr, obj);
 141 
 142   oop fwd = resolve_forwarded_not_null(obj);
 143   if (obj == fwd) {
 144     assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL),
 145            "evac should be in progress");
 146 
 147     ShenandoahEvacOOMScope oom_evac_scope;
 148 
 149     Thread* thread = Thread::current();
 150     oop res_oop = _heap->evacuate_object(obj, thread);
 151 
 152     // Since we are already here and paid the price of getting through runtime call adapters
 153     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 154     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 155     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 156     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 157     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 158     // their metadata (notably, klasses) may be incorrect already.
 159 
 160     size_t max = ShenandoahEvacAssist;
 161     if (max > 0) {
 162       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 163       // Other code uses complete marking context, because evac happens after the mark.
 164       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 165                                       _heap->marking_context() : _heap->complete_marking_context();
 166 
 167       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 168       assert(r->is_cset(), "sanity");
 169 
 170       HeapWord* cur = (HeapWord*)obj + obj->size();
 171 
 172       size_t count = 0;
 173       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 174         oop cur_oop = oop(cur);
 175         if (cur_oop == resolve_forwarded_not_null(cur_oop)) {
 176           _heap->evacuate_object(cur_oop, thread);
 177         }
 178         cur = cur + cur_oop->size();
 179       }
 180     }
 181 
 182     fwd = res_oop;
 183   }
 184 
 185   if (load_addr != NULL && fwd != obj) {
 186     // Since we are here and we know the load address, update the reference.
 187     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 188   }
 189 
 190   return fwd;
 191 }
 192 
 193 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 194   assert(ShenandoahLoadRefBarrier, "should be enabled");
 195   if (!CompressedOops::is_null(obj)) {
 196     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 197     oop fwd = resolve_forwarded_not_null(obj);
 198     if (evac_in_progress &&
 199         _heap->in_collection_set(obj) &&
 200         obj == fwd) {
 201       Thread *t = Thread::current();
 202       if (t->is_GC_task_thread()) {
 203         return _heap->evacuate_object(obj, t);
 204       } else {
 205         ShenandoahEvacOOMScope oom_evac_scope;
 206         return _heap->evacuate_object(obj, t);
 207       }
 208     } else {
 209       return fwd;
 210     }
 211   } else {
 212     return obj;
 213   }
 214 }
 215 
 216 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 217   // Create thread local data
 218   ShenandoahThreadLocalData::create(thread);
 219 }
 220 
 221 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 222   // Destroy thread local data
 223   ShenandoahThreadLocalData::destroy(thread);
 224 }
 225 
 226 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
 227   assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
 228          "We should not be at a safepoint");
 229   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 230   assert(!queue.is_active(), "SATB queue should not be active");
 231   assert( queue.is_empty(),  "SATB queue should be empty");
 232   queue.set_active(_satb_mark_queue_set.is_active());
 233   if (thread->is_Java_thread()) {
 234     ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
 235     ShenandoahThreadLocalData::initialize_gclab(thread);
 236   }
 237 }
 238 
 239 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
 240   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 241   queue.flush();
 242   if (thread->is_Java_thread()) {
 243     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 244     if (gclab != NULL) {
 245       gclab->retire();
 246     }
 247   }
 248 }
 249 
 250 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, oop* load_addr) {
 251   return load_reference_barrier_native_impl(obj, load_addr);
 252 }
 253 
 254 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, narrowOop* load_addr) {
 255   // Assumption: narrow oop version should not be used anywhere.
 256   ShouldNotReachHere();
 257   return NULL;
 258 }
 259 
 260 template <class T>
 261 oop ShenandoahBarrierSet::load_reference_barrier_native_impl(oop obj, T* load_addr) {
 262   if (CompressedOops::is_null(obj)) {
 263     return NULL;
 264   }
 265 
 266   ShenandoahMarkingContext* const marking_context = _heap->marking_context();
 267   if (_heap->is_concurrent_root_in_progress() && !marking_context->is_marked(obj)) {
 268     Thread* thr = Thread::current();
 269     if (thr->is_Java_thread()) {
 270       return NULL;
 271     } else {
 272       return obj;
 273     }
 274   }
 275 
 276   oop fwd = load_reference_barrier_not_null(obj);
 277   if (load_addr != NULL && fwd != obj) {
 278     // Since we are here and we know the load address, update the reference.
 279     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 280   }
 281 
 282   return fwd;
 283 }
 284 
 285 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
 286   if (_heap->has_forwarded_objects()) {
 287     clone_barrier(src);
 288   }
 289 }
 290