1 /*
   2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "aot/aotLoader.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/g1/g1BarrierSet.hpp"
  32 #include "gc/g1/g1CodeBlobClosure.hpp"
  33 #include "gc/g1/g1CollectedHeap.inline.hpp"
  34 #include "gc/g1/g1CollectorState.hpp"
  35 #include "gc/g1/g1GCPhaseTimes.hpp"
  36 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  37 #include "gc/g1/g1Policy.hpp"
  38 #include "gc/g1/g1RootClosures.hpp"
  39 #include "gc/g1/g1RootProcessor.hpp"
  40 #include "gc/g1/heapRegion.inline.hpp"
  41 #include "gc/shared/oopStorageParState.hpp"
  42 #include "gc/shared/referenceProcessor.hpp"
  43 #include "gc/shared/weakProcessor.hpp"
  44 #include "memory/allocation.inline.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "services/management.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
  50   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
  51 
  52   uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
  53   if (new_value == n_workers()) {
  54     // This thread is last. Notify the others.
  55     MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
  56     _lock.notify_all();
  57   }
  58 }
  59 
  60 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
  61   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
  62 
  63   if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
  64     MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
  65     while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
  66       _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
  67     }
  68   }
  69 }
  70 
  71 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
  72     _g1h(g1h),
  73     _process_strong_tasks(G1RP_PS_NumElements),
  74     _srs(n_workers),
  75     _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
  76     _par_state_string(StringTable::weak_storage()),
  77     _n_workers_discovered_strong_classes(0) {
  78   StringTable::reset_dead_counter();
  79 }
  80 
  81 G1RootProcessor::~G1RootProcessor() {
  82   StringTable::finish_dead_counter();
  83 }
  84 
  85 void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
  86   G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
  87 
  88   G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_i);
  89 
  90   G1EvacuationRootClosures* closures = pss->closures();
  91   process_java_roots(closures, phase_times, worker_i);
  92 
  93   // This is the point where this worker thread will not find more strong CLDs/nmethods.
  94   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
  95   if (closures->trace_metadata()) {
  96     worker_has_discovered_all_strong_classes();
  97   }
  98 
  99   process_vm_roots(closures, phase_times, worker_i);
 100   process_string_table_roots(closures, phase_times, worker_i);
 101 
 102   {
 103     // Now the CM ref_processor roots.
 104     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
 105     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
 106       // We need to treat the discovered reference lists of the
 107       // concurrent mark ref processor as roots and keep entries
 108       // (which are added by the marking threads) on them live
 109       // until they can be processed at the end of marking.
 110       _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
 111     }
 112   }
 113 
 114   if (closures->trace_metadata()) {
 115     {
 116       G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
 117       // Barrier to make sure all workers passed
 118       // the strong CLD and strong nmethods phases.
 119       wait_until_all_strong_classes_discovered();
 120     }
 121 
 122     // Now take the complement of the strong CLDs.
 123     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
 124     assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
 125     ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
 126   } else {
 127     phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
 128     phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
 129     assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
 130   }
 131 
 132   // During conc marking we have to filter the per-thread SATB buffers
 133   // to make sure we remove any oops into the CSet (which will show up
 134   // as implicitly live).
 135   {
 136     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
 137     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
 138       G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
 139     }
 140   }
 141 
 142   _process_strong_tasks.all_tasks_completed(n_workers());
 143 }
 144 
 145 // Adaptor to pass the closures to the strong roots in the VM.
 146 class StrongRootsClosures : public G1RootClosures {
 147   OopClosure* _roots;
 148   CLDClosure* _clds;
 149   CodeBlobClosure* _blobs;
 150 public:
 151   StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
 152       _roots(roots), _clds(clds), _blobs(blobs) {}
 153 
 154   OopClosure* weak_oops()   { return NULL; }
 155   OopClosure* strong_oops() { return _roots; }
 156 
 157   CLDClosure* weak_clds()        { return NULL; }
 158   CLDClosure* strong_clds()      { return _clds; }
 159 
 160   CodeBlobClosure* strong_codeblobs() { return _blobs; }
 161 };
 162 
 163 void G1RootProcessor::process_strong_roots(OopClosure* oops,
 164                                            CLDClosure* clds,
 165                                            CodeBlobClosure* blobs) {
 166   StrongRootsClosures closures(oops, clds, blobs);
 167 
 168   process_java_roots(&closures, NULL, 0);
 169   process_vm_roots(&closures, NULL, 0);
 170 
 171   _process_strong_tasks.all_tasks_completed(n_workers());
 172 }
 173 
 174 // Adaptor to pass the closures to all the roots in the VM.
 175 class AllRootsClosures : public G1RootClosures {
 176   OopClosure* _roots;
 177   CLDClosure* _clds;
 178 public:
 179   AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
 180       _roots(roots), _clds(clds) {}
 181 
 182   OopClosure* weak_oops() { return _roots; }
 183   OopClosure* strong_oops() { return _roots; }
 184 
 185   // By returning the same CLDClosure for both weak and strong CLDs we ensure
 186   // that a single walk of the CLDG will invoke the closure on all CLDs i the
 187   // system.
 188   CLDClosure* weak_clds() { return _clds; }
 189   CLDClosure* strong_clds() { return _clds; }
 190 
 191   // We don't want to visit code blobs more than once, so we return NULL for the
 192   // strong case and walk the entire code cache as a separate step.
 193   CodeBlobClosure* strong_codeblobs() { return NULL; }
 194 };
 195 
 196 void G1RootProcessor::process_all_roots(OopClosure* oops,
 197                                         CLDClosure* clds,
 198                                         CodeBlobClosure* blobs,
 199                                         bool process_string_table) {
 200   AllRootsClosures closures(oops, clds);
 201 
 202   process_java_roots(&closures, NULL, 0);
 203   process_vm_roots(&closures, NULL, 0);
 204 
 205   if (process_string_table) {
 206     process_string_table_roots(&closures, NULL, 0);
 207   }
 208   process_code_cache_roots(blobs, NULL, 0);
 209 
 210   _process_strong_tasks.all_tasks_completed(n_workers());
 211 }
 212 
 213 void G1RootProcessor::process_all_roots(OopClosure* oops,
 214                                         CLDClosure* clds,
 215                                         CodeBlobClosure* blobs) {
 216   process_all_roots(oops, clds, blobs, true);
 217 }
 218 
 219 void G1RootProcessor::process_all_roots_no_string_table(OopClosure* oops,
 220                                                         CLDClosure* clds,
 221                                                         CodeBlobClosure* blobs) {
 222   assert(!ClassUnloading, "Should only be used when class unloading is disabled");
 223   process_all_roots(oops, clds, blobs, false);
 224 }
 225 
 226 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
 227                                          G1GCPhaseTimes* phase_times,
 228                                          uint worker_i) {
 229   // Iterating over the CLDG and the Threads are done early to allow us to
 230   // first process the strong CLDs and nmethods and then, after a barrier,
 231   // let the thread process the weak CLDs and nmethods.
 232   {
 233     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
 234     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
 235       ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
 236     }
 237   }
 238 
 239   {
 240     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
 241     bool is_par = n_workers() > 1;
 242     Threads::possibly_parallel_oops_do(is_par,
 243                                        closures->strong_oops(),
 244                                        closures->strong_codeblobs());
 245   }
 246 }
 247 
 248 void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
 249                                        G1GCPhaseTimes* phase_times,
 250                                        uint worker_i) {
 251   OopClosure* strong_roots = closures->strong_oops();
 252 
 253   {
 254     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
 255     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
 256       Universe::oops_do(strong_roots);
 257     }
 258   }
 259 
 260   {
 261     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
 262     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
 263       JNIHandles::oops_do(strong_roots);
 264     }
 265   }
 266 
 267   {
 268     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
 269     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
 270       ObjectSynchronizer::oops_do(strong_roots);
 271     }
 272   }
 273 
 274   {
 275     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
 276     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
 277       Management::oops_do(strong_roots);
 278     }
 279   }
 280 
 281   {
 282     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
 283     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
 284       JvmtiExport::oops_do(strong_roots);
 285     }
 286   }
 287 
 288 #if INCLUDE_AOT
 289   if (UseAOT) {
 290     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i);
 291     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_aot_oops_do)) {
 292         AOTLoader::oops_do(strong_roots);
 293     }
 294   }
 295 #endif
 296 
 297   {
 298     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
 299     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
 300       SystemDictionary::oops_do(strong_roots);
 301     }
 302   }
 303 }
 304 
 305 void G1RootProcessor::process_string_table_roots(G1RootClosures* closures,
 306                                                  G1GCPhaseTimes* phase_times,
 307                                                  uint worker_i) {
 308   assert(closures->weak_oops() != NULL, "Should only be called when all roots are processed");
 309   G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
 310   // All threads execute the following. A specific chunk of buckets
 311   // from the StringTable are the individual tasks.
 312   StringTable::possibly_parallel_oops_do(&_par_state_string, closures->weak_oops());
 313 }
 314 
 315 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
 316                                                G1GCPhaseTimes* phase_times,
 317                                                uint worker_i) {
 318   if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
 319     CodeCache::blobs_do(code_closure);
 320   }
 321 }
 322 
 323 void G1RootProcessor::process_full_gc_weak_roots(OopClosure* oops) {
 324   if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
 325     _g1h->ref_processor_stw()->weak_oops_do(oops);
 326   }
 327 
 328   if (!_process_strong_tasks.is_task_claimed(G1RP_PS_weakProcessor_oops_do)) {
 329     WeakProcessor::oops_do(oops);
 330   }
 331 }
 332 
 333 uint G1RootProcessor::n_workers() const {
 334   return _srs.n_threads();
 335 }