1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/g1/g1BarrierSet.hpp"
  32 #include "gc/g1/g1CodeBlobClosure.hpp"
  33 #include "gc/g1/g1CollectedHeap.inline.hpp"
  34 #include "gc/g1/g1CollectorState.hpp"
  35 #include "gc/g1/g1GCPhaseTimes.hpp"
  36 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  37 #include "gc/g1/g1Policy.hpp"
  38 #include "gc/g1/g1RootClosures.hpp"
  39 #include "gc/g1/g1RootProcessor.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/shared/referenceProcessor.hpp"
  42 #include "memory/allocation.inline.hpp"
  43 #include "memory/universe.hpp"
  44 #include "runtime/mutexLocker.inline.hpp"
  45 #include "services/management.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 void G1RootProcessor::worker_has_discovered_all_strong_nmethods() {
  49   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
  50 
  51   uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
  52   if (new_value == n_workers()) {
  53     // This thread is last. Notify the others.
  54     MonitorLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
  55     _lock.notify_all();
  56   }
  57 }
  58 
  59 void G1RootProcessor::wait_until_all_strong_nmethods_discovered() {
  60   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
  61 
  62   if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
  63     MonitorLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
  64     while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
  65       ml.wait(0);
  66     }
  67   }
  68 }
  69 
  70 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
  71     _g1h(g1h),
  72     _process_strong_tasks(G1RP_PS_NumElements),
  73     _srs(n_workers),
  74     _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Mutex::_safepoint_check_never),
  75     _n_workers_discovered_strong_classes(0) {}
  76 
  77 void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id) {
  78   G1GCPhaseTimes* phase_times = _g1h->phase_times();
  79 
  80   G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_id);
  81 
  82   G1EvacuationRootClosures* closures = pss->closures();
  83   process_java_roots(closures, phase_times, worker_id, closures->trace_metadata() /* notify_claimed_nmethods_done */);
  84 
  85   process_vm_roots(closures, phase_times, worker_id);
  86 
  87   {
  88     // Now the CM ref_processor roots.
  89     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_id);
  90     if (_process_strong_tasks.try_claim_task(G1RP_PS_refProcessor_oops_do)) {
  91       // We need to treat the discovered reference lists of the
  92       // concurrent mark ref processor as roots and keep entries
  93       // (which are added by the marking threads) on them live
  94       // until they can be processed at the end of marking.
  95       _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
  96     }
  97   }
  98 
  99   if (closures->trace_metadata()) {
 100     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongRoots, worker_id);
 101     // Wait to make sure all workers passed the strong nmethods phase.
 102     wait_until_all_strong_nmethods_discovered();
 103   }
 104 
 105   _process_strong_tasks.all_tasks_completed(n_workers());
 106 }
 107 
 108 // Adaptor to pass the closures to the strong roots in the VM.
 109 class StrongRootsClosures : public G1RootClosures {
 110   OopClosure* _roots;
 111   CLDClosure* _clds;
 112   CodeBlobClosure* _blobs;
 113 public:
 114   StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
 115       _roots(roots), _clds(clds), _blobs(blobs) {}
 116 
 117   OopClosure* weak_oops()   { return NULL; }
 118   OopClosure* strong_oops() { return _roots; }
 119 
 120   CLDClosure* weak_clds()        { return NULL; }
 121   CLDClosure* strong_clds()      { return _clds; }
 122 
 123   CodeBlobClosure* strong_codeblobs() { return _blobs; }
 124 };
 125 
 126 void G1RootProcessor::process_strong_roots(OopClosure* oops,
 127                                            CLDClosure* clds,
 128                                            CodeBlobClosure* blobs) {
 129   StrongRootsClosures closures(oops, clds, blobs);
 130 
 131   process_java_roots(&closures, NULL, 0);
 132   process_vm_roots(&closures, NULL, 0);
 133 
 134   _process_strong_tasks.all_tasks_completed(n_workers());
 135 }
 136 
 137 // Adaptor to pass the closures to all the roots in the VM.
 138 class AllRootsClosures : public G1RootClosures {
 139   OopClosure* _roots;
 140   CLDClosure* _clds;
 141 public:
 142   AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
 143       _roots(roots), _clds(clds) {}
 144 
 145   OopClosure* weak_oops() { return _roots; }
 146   OopClosure* strong_oops() { return _roots; }
 147 
 148   // By returning the same CLDClosure for both weak and strong CLDs we ensure
 149   // that a single walk of the CLDG will invoke the closure on all CLDs i the
 150   // system.
 151   CLDClosure* weak_clds() { return _clds; }
 152   CLDClosure* strong_clds() { return _clds; }
 153 
 154   // We don't want to visit code blobs more than once, so we return NULL for the
 155   // strong case and walk the entire code cache as a separate step.
 156   CodeBlobClosure* strong_codeblobs() { return NULL; }
 157 };
 158 
 159 void G1RootProcessor::process_all_roots(OopClosure* oops,
 160                                         CLDClosure* clds,
 161                                         CodeBlobClosure* blobs) {
 162   AllRootsClosures closures(oops, clds);
 163 
 164   process_java_roots(&closures, NULL, 0);
 165   process_vm_roots(&closures, NULL, 0);
 166 
 167   process_code_cache_roots(blobs, NULL, 0);
 168 
 169   _process_strong_tasks.all_tasks_completed(n_workers());
 170 }
 171 
 172 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
 173                                          G1GCPhaseTimes* phase_times,
 174                                          uint worker_id,
 175                                          bool notify_claimed_nmethods_done) {
 176   // We need to make make sure that the "strong" nmethods are processed first
 177   // using the strong closure. Only after that we process the weakly reachable
 178   // nmethods.
 179   // We need to strictly separate the strong and weak nmethod processing because
 180   // any processing claims that nmethod, i.e. will not be iterated again.
 181   // Which means if an nmethod is processed first and claimed, the strong processing
 182   // will not happen, and the oops reachable by that nmethod will not be marked
 183   // properly.
 184   //
 185   // That is why we process strong nmethods first, synchronize all threads via a
 186   // barrier, and only then allow weak processing. To minimize the wait time at
 187   // that barrier we do the strong nmethod processing first, and immediately after-
 188   // wards indicate that that thread is done. Hopefully other root processing after
 189   // nmethod processing is enough so there is no need to wait.
 190   //
 191   // This is only required in the concurrent start pause with class unloading enabled.
 192   {
 193     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_id);
 194     bool is_par = n_workers() > 1;
 195     Threads::possibly_parallel_oops_do(is_par,
 196                                        closures->strong_oops(),
 197                                        closures->strong_codeblobs());
 198   }
 199 
 200   // This is the point where this worker thread will not find more strong nmethods.
 201   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
 202   if (notify_claimed_nmethods_done) {
 203     worker_has_discovered_all_strong_nmethods();
 204   }
 205 
 206   {
 207     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_id);
 208     if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
 209       ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
 210     }
 211   }
 212 }
 213 
 214 void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
 215                                        G1GCPhaseTimes* phase_times,
 216                                        uint worker_id) {
 217   OopClosure* strong_roots = closures->strong_oops();
 218 
 219   {
 220     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_id);
 221     if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
 222       Universe::oops_do(strong_roots);
 223     }
 224   }
 225 
 226   {
 227     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_id);
 228     if (_process_strong_tasks.try_claim_task(G1RP_PS_JNIHandles_oops_do)) {
 229       JNIHandles::oops_do(strong_roots);
 230     }
 231   }
 232 
 233   {
 234     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_id);
 235     if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
 236       ObjectSynchronizer::oops_do(strong_roots);
 237     }
 238   }
 239 
 240   {
 241     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_id);
 242     if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
 243       Management::oops_do(strong_roots);
 244     }
 245   }
 246 
 247   {
 248     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_id);
 249     if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
 250       JvmtiExport::oops_do(strong_roots);
 251     }
 252   }
 253 
 254 #if INCLUDE_AOT
 255   if (UseAOT) {
 256     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
 257     if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
 258         AOTLoader::oops_do(strong_roots);
 259     }
 260   }
 261 #endif
 262 
 263   {
 264     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_id);
 265     if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
 266       SystemDictionary::oops_do(strong_roots);
 267     }
 268   }
 269 }
 270 
 271 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
 272                                                G1GCPhaseTimes* phase_times,
 273                                                uint worker_id) {
 274   if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
 275     CodeCache::blobs_do(code_closure);
 276   }
 277 }
 278 
 279 uint G1RootProcessor::n_workers() const {
 280   return _srs.n_threads();
 281 }