1 /*
   2  * Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP
  26 #define SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP
  27 
  28 #include "classfile/classLoaderDataGraph.hpp"
  29 #include "classfile/stringTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "gc/shared/oopStorageParState.inline.hpp"
  32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  37 #include "gc/shenandoah/shenandoahUtils.hpp"
  38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "prims/resolvedMethodTable.hpp"
  41 #include "runtime/safepoint.hpp"
  42 
  43 template <bool CONCURRENT>
  44 inline ShenandoahVMRoot<CONCURRENT>::ShenandoahVMRoot(OopStorage* storage,
  45         ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
  46   _itr(storage), _phase(phase), _par_phase(par_phase) {
  47 }
  48 
  49 template <bool CONCURRENT>
  50 template <typename Closure>
  51 inline void ShenandoahVMRoot<CONCURRENT>::oops_do(Closure* cl, uint worker_id) {
  52   ShenandoahWorkerTimingsTracker timer(_phase, _par_phase, worker_id);
  53   _itr.oops_do(cl);
  54 }
  55 
  56 template <bool CONCURRENT>
  57 inline ShenandoahWeakRoot<CONCURRENT>::ShenandoahWeakRoot(OopStorage* storage,
  58   ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) :
  59   ShenandoahVMRoot<CONCURRENT>(storage, phase, par_phase) {
  60 }
  61 
  62 inline ShenandoahWeakRoot<false>::ShenandoahWeakRoot(OopStorage* storage,
  63   ShenandoahPhaseTimings::Phase phase,  ShenandoahPhaseTimings::ParPhase par_phase) :
  64   _itr(storage), _phase(phase), _par_phase(par_phase) {
  65 }
  66 
  67 template <typename IsAliveClosure, typename KeepAliveClosure>
  68 void ShenandoahWeakRoot<false /* concurrent */>::weak_oops_do(IsAliveClosure* is_alive, KeepAliveClosure* keep_alive, uint worker_id) {
  69   ShenandoahWorkerTimingsTracker timer(_phase, _par_phase, worker_id);
  70   _itr.weak_oops_do(is_alive, keep_alive);
  71 }
  72 
  73 template <bool CONCURRENT>
  74 ShenandoahWeakRoots<CONCURRENT>::ShenandoahWeakRoots() :
  75   _jni_roots(OopStorageSet::jni_weak(), ShenandoahPhaseTimings::JNIWeakRoots),
  76   _string_table_roots(OopStorageSet::string_table_weak(), ShenandoahPhaseTimings::StringTableRoots),
  77   _resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), ShenandoahPhaseTimings::ResolvedMethodTableRoots),
  78   _vm_roots(OopStorageSet::vm_weak(), ShenandoahPhaseTimings::VMWeakRoots) {
  79 }
  80 
  81 template <bool CONCURRENT>
  82 template <typename Closure>
  83 void ShenandoahWeakRoots<CONCURRENT>::oops_do(Closure* cl, uint worker_id) {
  84   _jni_roots.oops_do(cl, worker_id);
  85   _string_table_roots.oops_do(cl, worker_id);
  86   _resolved_method_table_roots.oops_do(cl, worker_id);
  87   _vm_roots.oops_do(cl, worker_id);
  88 }
  89 
  90 inline ShenandoahWeakRoots<false /* concurrent */>::ShenandoahWeakRoots(ShenandoahPhaseTimings::Phase phase) :
  91   _jni_roots(OopStorageSet::jni_weak(), phase, ShenandoahPhaseTimings::JNIWeakRoots),
  92   _string_table_roots(OopStorageSet::string_table_weak(), phase, ShenandoahPhaseTimings::StringTableRoots),
  93   _resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), phase, ShenandoahPhaseTimings::ResolvedMethodTableRoots),
  94   _vm_roots(OopStorageSet::vm_weak(), phase, ShenandoahPhaseTimings::VMWeakRoots) {
  95 }
  96 
  97 template <typename IsAliveClosure, typename KeepAliveClosure>
  98 void ShenandoahWeakRoots<false /* concurrent*/>::weak_oops_do(IsAliveClosure* is_alive, KeepAliveClosure* keep_alive, uint worker_id) {
  99   _jni_roots.weak_oops_do(is_alive, keep_alive, worker_id);
 100   _string_table_roots.weak_oops_do(is_alive, keep_alive, worker_id);
 101   _resolved_method_table_roots.weak_oops_do(is_alive, keep_alive, worker_id);
 102   _vm_roots.weak_oops_do(is_alive, keep_alive, worker_id);
 103 }
 104 
 105 template <typename Closure>
 106 void ShenandoahWeakRoots<false /* concurrent */>::oops_do(Closure* cl, uint worker_id) {
 107   AlwaysTrueClosure always_true;
 108   weak_oops_do<AlwaysTrueClosure, Closure>(&always_true, cl, worker_id);
 109 }
 110 
 111 template <bool CONCURRENT>
 112 ShenandoahVMRoots<CONCURRENT>::ShenandoahVMRoots(ShenandoahPhaseTimings::Phase phase) :
 113   _jni_handle_roots(OopStorageSet::jni_global(), phase, ShenandoahPhaseTimings::JNIRoots),
 114   _vm_global_roots(OopStorageSet::vm_global(), phase, ShenandoahPhaseTimings::VMGlobalRoots) {
 115 }
 116 
 117 template <bool CONCURRENT>
 118 template <typename T>
 119 void ShenandoahVMRoots<CONCURRENT>::oops_do(T* cl, uint worker_id) {
 120   _jni_handle_roots.oops_do(cl, worker_id);
 121   _vm_global_roots.oops_do(cl, worker_id);
 122 }
 123 
 124 template <bool CONCURRENT, bool SINGLE_THREADED>
 125 ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers) :
 126   _semaphore(worker_count(n_workers)),
 127   _phase(phase) {
 128   if (!SINGLE_THREADED) {
 129     ClassLoaderDataGraph::clear_claimed_marks();
 130   }
 131   if (CONCURRENT) {
 132     ClassLoaderDataGraph_lock->lock();
 133   }
 134 }
 135 
 136 template <bool CONCURRENT, bool SINGLE_THREADED>
 137 ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::~ShenandoahClassLoaderDataRoots() {
 138   if (CONCURRENT) {
 139     ClassLoaderDataGraph_lock->unlock();
 140   }
 141 }
 142 
 143 
 144 template <bool CONCURRENT, bool SINGLE_THREADED>
 145 void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::always_strong_cld_do(CLDClosure* clds, uint worker_id) {
 146   if (SINGLE_THREADED) {
 147     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 148     assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
 149     ClassLoaderDataGraph::always_strong_cld_do(clds);
 150   } else if (_semaphore.try_acquire()) {
 151     ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
 152     ClassLoaderDataGraph::always_strong_cld_do(clds);
 153     _semaphore.claim_all();
 154   }
 155 }
 156 
 157 template <bool CONCURRENT, bool SINGLE_THREADED>
 158 void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::cld_do(CLDClosure* clds, uint worker_id) {
 159   if (SINGLE_THREADED) {
 160     assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 161     assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
 162     ClassLoaderDataGraph::cld_do(clds);
 163   } else if (_semaphore.try_acquire()) {
 164     ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
 165     ClassLoaderDataGraph::cld_do(clds);
 166     _semaphore.claim_all();
 167   }
 168 }
 169 
 170 class ShenandoahParallelOopsDoThreadClosure : public ThreadClosure {
 171 private:
 172   OopClosure* _f;
 173   CodeBlobClosure* _cf;
 174   ThreadClosure* _thread_cl;
 175 public:
 176   ShenandoahParallelOopsDoThreadClosure(OopClosure* f, CodeBlobClosure* cf, ThreadClosure* thread_cl) :
 177     _f(f), _cf(cf), _thread_cl(thread_cl) {}
 178 
 179   void do_thread(Thread* t) {
 180     if (_thread_cl != NULL) {
 181       _thread_cl->do_thread(t);
 182     }
 183     t->oops_do(_f, _cf);
 184   }
 185 };
 186 
 187 template <bool CONCURRENT>
 188 ShenandoahConcurrentRootScanner<CONCURRENT>::ShenandoahConcurrentRootScanner(uint n_workers,
 189                                                                              ShenandoahPhaseTimings::Phase phase) :
 190   _vm_roots(phase),
 191   _cld_roots(phase, n_workers),
 192   _dedup_roots(phase),
 193   _codecache_snapshot(NULL),
 194   _phase(phase) {
 195   if (!ShenandoahHeap::heap()->unload_classes()) {
 196     if (CONCURRENT) {
 197       CodeCache_lock->lock_without_safepoint_check();
 198     } else {
 199       assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
 200     }
 201     _codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
 202   }
 203   assert(!CONCURRENT || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
 204 }
 205 
 206 template <bool CONCURRENT>
 207 ShenandoahConcurrentRootScanner<CONCURRENT>::~ShenandoahConcurrentRootScanner() {
 208   if (!ShenandoahHeap::heap()->unload_classes()) {
 209     ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
 210     if (CONCURRENT) {
 211       CodeCache_lock->unlock();
 212     }
 213   }
 214 }
 215 
 216 template <bool CONCURRENT>
 217 void ShenandoahConcurrentRootScanner<CONCURRENT>::oops_do(OopClosure* oops, uint worker_id) {
 218   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 219   CLDToOopClosure clds_cl(oops, CONCURRENT ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none);
 220   _vm_roots.oops_do(oops, worker_id);
 221 
 222   if (!heap->unload_classes()) {
 223     AlwaysTrueClosure always_true;
 224     _cld_roots.cld_do(&clds_cl, worker_id);
 225     _dedup_roots.oops_do(&always_true, oops, worker_id);
 226     ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 227     CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
 228     _codecache_snapshot->parallel_blobs_do(&blobs);
 229   } else {
 230     _cld_roots.always_strong_cld_do(&clds_cl, worker_id);
 231   }
 232 }
 233 
 234 template <typename IsAlive, typename KeepAlive>
 235 void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
 236   CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
 237   ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(keep_alive);
 238   CodeBlobToOopClosure* codes_cl = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
 239                                   static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
 240                                   static_cast<CodeBlobToOopClosure*>(&update_blobs);
 241 
 242   CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
 243 
 244   // Process serial-claiming roots first
 245   _serial_roots.oops_do(keep_alive, worker_id);
 246   _serial_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
 247 
 248   // Process light-weight/limited parallel roots then
 249   _vm_roots.oops_do(keep_alive, worker_id);
 250   _weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
 251   _dedup_roots.oops_do(is_alive, keep_alive, worker_id);
 252   _cld_roots.cld_do(&clds, worker_id);
 253 
 254   // Process heavy-weight/fully parallel roots the last
 255   _code_roots.code_blobs_do(codes_cl, worker_id);
 256   _thread_roots.oops_do(keep_alive, NULL, worker_id);
 257 }
 258 
 259 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP