1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shenandoah/shenandoahAsserts.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  28 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  35 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  36 #include "memory/iterator.inline.hpp"
  37 #include "runtime/interfaceSupport.inline.hpp"
  38 #ifdef COMPILER1
  39 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  40 #endif
  41 #ifdef COMPILER2
  42 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  43 #endif
  44 
  45 class ShenandoahBarrierSetC1;
  46 class ShenandoahBarrierSetC2;
  47 
  48 static BarrierSetNMethod* make_barrier_set_nmethod(ShenandoahHeap* heap) {
  49   // NMethod barriers are only used when concurrent nmethod unloading is enabled
  50   if (!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
  51     return NULL;
  52   }
  53   return new ShenandoahBarrierSetNMethod(heap);
  54 }
  55 
  56 ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
  57   BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
  58              make_barrier_set_c1<ShenandoahBarrierSetC1>(),
  59              make_barrier_set_c2<ShenandoahBarrierSetC2>(),
  60              make_barrier_set_nmethod(heap),
  61              BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
  62   _heap(heap),
  63   _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),
  64   _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator)
  65 {
  66 }
  67 
  68 ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::assembler() {
  69   BarrierSetAssembler* const bsa = BarrierSet::barrier_set()->barrier_set_assembler();
  70   return reinterpret_cast<ShenandoahBarrierSetAssembler*>(bsa);
  71 }
  72 
  73 void ShenandoahBarrierSet::print_on(outputStream* st) const {
  74   st->print("ShenandoahBarrierSet");
  75 }
  76 
  77 bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) {
  78   return bsn == BarrierSet::ShenandoahBarrierSet;
  79 }
  80 
  81 bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) {
  82   return true;
  83 }
  84 
  85 bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
  86   if (!ShenandoahLoadRefBarrier) return false;
  87   // Only needed for references
  88   return is_reference_type(type);
  89 }
  90 
  91 bool ShenandoahBarrierSet::use_load_reference_barrier_native(DecoratorSet decorators, BasicType type) {
  92   assert(need_load_reference_barrier(decorators, type), "Should be subset of LRB");
  93   assert(is_reference_type(type), "Why we here?");
  94   // Native load reference barrier is only needed for concurrent root processing
  95   if (!ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
  96     return false;
  97   }
  98 
  99   return (decorators & IN_NATIVE) != 0;
 100 }
 101 
 102 bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) {
 103   if (!ShenandoahKeepAliveBarrier) return false;
 104   // Only needed for references
 105   if (!is_reference_type(type)) return false;
 106 
 107   bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 108   bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 109   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 110   bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
 111   return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode);
 112 }
 113 
 114 oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) {
 115   if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) {
 116     return load_reference_barrier_impl(obj);
 117   } else {
 118     return obj;
 119   }
 120 }
 121 
 122 oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
 123   if (obj != NULL) {
 124     return load_reference_barrier_not_null(obj);
 125   } else {
 126     return obj;
 127   }
 128 }
 129 
 130 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) {
 131   return load_reference_barrier_mutator_work(obj, load_addr);
 132 }
 133 
 134 oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) {
 135   return load_reference_barrier_mutator_work(obj, load_addr);
 136 }
 137 
 138 template <class T>
 139 oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) {
 140   assert(ShenandoahLoadRefBarrier, "should be enabled");
 141   shenandoah_assert_in_cset(load_addr, obj);
 142 
 143   oop fwd = resolve_forwarded_not_null_mutator(obj);
 144   if (obj == fwd) {
 145     assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL),
 146            "evac should be in progress");
 147 
 148     ShenandoahEvacOOMScope oom_evac_scope;
 149 
 150     Thread* thread = Thread::current();
 151     oop res_oop = _heap->evacuate_object(obj, thread);
 152 
 153     // Since we are already here and paid the price of getting through runtime call adapters
 154     // and acquiring oom-scope, it makes sense to try and evacuate more adjacent objects,
 155     // thus amortizing the overhead. For sparsely live heaps, scan costs easily dominate
 156     // total assist costs, and can introduce a lot of evacuation latency. This is why we
 157     // only scan for _nearest_ N objects, regardless if they are eligible for evac or not.
 158     // The scan itself should also avoid touching the non-marked objects below TAMS, because
 159     // their metadata (notably, klasses) may be incorrect already.
 160 
 161     size_t max = ShenandoahEvacAssist;
 162     if (max > 0) {
 163       // Traversal is special: it uses incomplete marking context, because it coalesces evac with mark.
 164       // Other code uses complete marking context, because evac happens after the mark.
 165       ShenandoahMarkingContext* ctx = _heap->is_concurrent_traversal_in_progress() ?
 166                                       _heap->marking_context() : _heap->complete_marking_context();
 167 
 168       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
 169       assert(r->is_cset(), "sanity");
 170 
 171       HeapWord* cur = cast_from_oop<HeapWord*>(obj) + obj->size();
 172 
 173       size_t count = 0;
 174       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
 175         oop cur_oop = oop(cur);
 176         if (cur_oop == resolve_forwarded_not_null_mutator(cur_oop)) {
 177           _heap->evacuate_object(cur_oop, thread);
 178         }
 179         cur = cur + cur_oop->size();
 180       }
 181     }
 182 
 183     fwd = res_oop;
 184   }
 185 
 186   if (load_addr != NULL && fwd != obj) {
 187     // Since we are here and we know the load address, update the reference.
 188     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 189   }
 190 
 191   return fwd;
 192 }
 193 
 194 oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) {
 195   assert(ShenandoahLoadRefBarrier, "should be enabled");
 196   if (!CompressedOops::is_null(obj)) {
 197     bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 198     oop fwd = resolve_forwarded_not_null(obj);
 199     if (evac_in_progress &&
 200         _heap->in_collection_set(obj) &&
 201         obj == fwd) {
 202       Thread *t = Thread::current();
 203       ShenandoahEvacOOMScope oom_evac_scope;
 204       return _heap->evacuate_object(obj, t);
 205     } else {
 206       return fwd;
 207     }
 208   } else {
 209     return obj;
 210   }
 211 }
 212 
 213 void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
 214   // Create thread local data
 215   ShenandoahThreadLocalData::create(thread);
 216 }
 217 
 218 void ShenandoahBarrierSet::on_thread_destroy(Thread* thread) {
 219   // Destroy thread local data
 220   ShenandoahThreadLocalData::destroy(thread);
 221 }
 222 
 223 void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
 224   assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(),
 225          "We should not be at a safepoint");
 226   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 227   assert(!queue.is_active(), "SATB queue should not be active");
 228   assert( queue.is_empty(),  "SATB queue should be empty");
 229   queue.set_active(_satb_mark_queue_set.is_active());
 230   if (thread->is_Java_thread()) {
 231     ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
 232     ShenandoahThreadLocalData::initialize_gclab(thread);
 233   }
 234 }
 235 
 236 void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
 237   SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
 238   queue.flush();
 239   if (thread->is_Java_thread()) {
 240     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 241     if (gclab != NULL) {
 242       gclab->retire();
 243     }
 244   }
 245 }
 246 
 247 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, oop* load_addr) {
 248   return load_reference_barrier_native_impl(obj, load_addr);
 249 }
 250 
 251 oop ShenandoahBarrierSet::load_reference_barrier_native(oop obj, narrowOop* load_addr) {
 252   return load_reference_barrier_native_impl(obj, load_addr);
 253 }
 254 
 255 template <class T>
 256 oop ShenandoahBarrierSet::load_reference_barrier_native_impl(oop obj, T* load_addr) {
 257   if (CompressedOops::is_null(obj)) {
 258     return NULL;
 259   }
 260 
 261   ShenandoahMarkingContext* const marking_context = _heap->marking_context();
 262   if (_heap->is_concurrent_root_in_progress() && !marking_context->is_marked(obj)) {
 263     Thread* thr = Thread::current();
 264     if (thr->is_Java_thread()) {
 265       return NULL;
 266     } else {
 267       return obj;
 268     }
 269   }
 270 
 271   oop fwd = load_reference_barrier_not_null(obj);
 272   if (load_addr != NULL && fwd != obj) {
 273     // Since we are here and we know the load address, update the reference.
 274     ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 275   }
 276 
 277   return fwd;
 278 }
 279 
 280 void ShenandoahBarrierSet::clone_barrier_runtime(oop src) {
 281   if (_heap->has_forwarded_objects()) {
 282     clone_barrier(src);
 283   }
 284 }
 285