1 /*
   2  * Copyright (c) 2015, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "runtime/fprofiler.hpp"
  38 #include "runtime/mutex.hpp"
  39 #include "services/management.hpp"
  40 
  41 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
  42                                                  ShenandoahPhaseTimings::Phase phase) :
  43   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
  44   _srs(heap, true),
  45   _phase(phase),
  46   _coderoots_all_iterator(ShenandoahCodeRoots::iterator()),
  47   _om_iterator(ObjectSynchronizer::parallel_iterator())
  48 {
  49   heap->phase_timings()->record_workers_start(_phase);
  50   _process_strong_tasks->set_n_threads(n_workers);
  51   heap->set_par_threads(n_workers);
  52 }
  53 
  54 ShenandoahRootProcessor::~ShenandoahRootProcessor() {
  55   delete _process_strong_tasks;
  56   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
  57 }
  58 
  59 void ShenandoahRootProcessor::process_all_roots_slow(OopClosure* oops) {
  60   ShenandoahAlwaysTrueClosure always_true;
  61 
  62   CLDToOopClosure clds(oops);
  63   CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
  64 
  65   CodeCache::blobs_do(&blobs);
  66   ClassLoaderDataGraph::cld_do(&clds);
  67   Universe::oops_do(oops);
  68   FlatProfiler::oops_do(oops);
  69   Management::oops_do(oops);
  70   JvmtiExport::oops_do(oops);
  71   JNIHandles::oops_do(oops);
  72   JNIHandles::weak_oops_do(&always_true, oops);
  73   ObjectSynchronizer::oops_do(oops);
  74   SystemDictionary::roots_oops_do(oops, oops);
  75   StringTable::oops_do(oops);
  76 
  77   // Do thread roots the last. This allows verification code to find
  78   // any broken objects from those special roots first, not the accidental
  79   // dangling reference from the thread root.
  80   Threads::possibly_parallel_oops_do(oops, &clds, &blobs);
  81 }
  82 
  83 void ShenandoahRootProcessor::process_strong_roots(OopClosure* oops,
  84                                                    OopClosure* weak_oops,
  85                                                    CLDClosure* clds,
  86                                                    CLDClosure* weak_clds,
  87                                                    CodeBlobClosure* blobs,
  88                                                    ThreadClosure* thread_cl,
  89                                                    uint worker_id) {
  90   assert(thread_cl == NULL, "not implemented yet");
  91   process_java_roots(oops, clds, clds, weak_clds, blobs, worker_id);
  92   process_vm_roots(oops, NULL, weak_oops, worker_id);
  93 
  94   _process_strong_tasks->all_tasks_completed();
  95 }
  96 
  97 void ShenandoahRootProcessor::process_all_roots(OopClosure* oops,
  98                                                 OopClosure* weak_oops,
  99                                                 CLDClosure* clds,
 100                                                 CodeBlobClosure* blobs,
 101                                                 ThreadClosure* thread_cl,
 102                                                 uint worker_id) {
 103 
 104   assert(thread_cl == NULL, "not implemented yet");
 105   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 106   process_java_roots(oops, NULL, clds, clds, NULL, worker_id);
 107   process_vm_roots(oops, oops, weak_oops, worker_id);
 108 
 109   if (blobs != NULL) {
 110     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 111     _coderoots_all_iterator.possibly_parallel_blobs_do(blobs);
 112   }
 113 
 114   _process_strong_tasks->all_tasks_completed();
 115 }
 116 
 117 void ShenandoahRootProcessor::process_java_roots(OopClosure* strong_roots,
 118                                                  CLDClosure* thread_clds,
 119                                                  CLDClosure* strong_clds,
 120                                                  CLDClosure* weak_clds,
 121                                                  CodeBlobClosure* strong_code,
 122                                                  uint worker_id)
 123 {
 124   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 125   // Iterating over the CLDG and the Threads are done early to allow us to
 126   // first process the strong CLDs and nmethods and then, after a barrier,
 127   // let the thread process the weak CLDs and nmethods.
 128   {
 129     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id);
 130     _cld_iterator.root_cld_do(strong_clds, weak_clds);
 131   }
 132 
 133   {
 134     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
 135     ResourceMark rm;
 136     Threads::possibly_parallel_oops_do(strong_roots, thread_clds, strong_code);
 137   }
 138 }
 139 
 140 void ShenandoahRootProcessor::process_vm_roots(OopClosure* strong_roots,
 141                                                OopClosure* weak_roots,
 142                                                OopClosure* jni_weak_roots,
 143                                                uint worker_id)
 144 {
 145   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 146   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Universe_oops_do)) {
 147     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::UniverseRoots, worker_id);
 148     Universe::oops_do(strong_roots);
 149   }
 150 
 151   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_oops_do)) {
 152     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIRoots, worker_id);
 153     JNIHandles::oops_do(strong_roots);
 154   }
 155 
 156   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_FlatProfiler_oops_do)) {
 157     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FlatProfilerRoots, worker_id);
 158     FlatProfiler::oops_do(strong_roots);
 159   }
 160   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Management_oops_do)) {
 161     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ManagementRoots, worker_id);
 162     Management::oops_do(strong_roots);
 163   }
 164   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_jvmti_oops_do)) {
 165     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
 166     JvmtiExport::oops_do(strong_roots);
 167   }
 168   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_SystemDictionary_oops_do)) {
 169     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id);
 170     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 171   }
 172   if (jni_weak_roots != NULL) {
 173     if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_weak_oops_do)) {
 174       ShenandoahAlwaysTrueClosure always_true;
 175       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIWeakRoots, worker_id);
 176       JNIHandles::weak_oops_do(&always_true, jni_weak_roots);
 177     }
 178   }
 179 
 180   {
 181     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
 182     if (ShenandoahFastSyncRoots && MonitorInUseLists) {
 183       ObjectSynchronizer::oops_do(strong_roots);
 184     } else {
 185       while(_om_iterator.parallel_oops_do(strong_roots));
 186     }
 187   }
 188   // All threads execute the following. A specific chunk of buckets
 189   // from the StringTable are the individual tasks.
 190   if (weak_roots != NULL) {
 191     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringTableRoots, worker_id);
 192     StringTable::possibly_parallel_oops_do(weak_roots);
 193   }
 194 }
 195 
 196 ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) :
 197   _srs(heap, true),
 198   _phase(phase),
 199   _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
 200 {
 201   heap->set_par_threads(n_workers);
 202   heap->phase_timings()->record_workers_start(_phase);
 203 }
 204 
 205 ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
 206   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 207 }
 208 
 209 void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
 210                                                      CodeBlobClosure* blobs,
 211                                                      uint worker_id) {
 212   ShenandoahHeap* heap = ShenandoahHeap::heap();
 213 
 214   {
 215     // Evacuate the PLL here so that the SurrogateLockerThread doesn't
 216     // have to. SurrogateLockerThread can execute write barrier in VMOperation
 217     // prolog. If the SLT runs into OOM during that evacuation, the VMOperation
 218     // may deadlock. Doing this evacuation the first thing makes that critical
 219     // OOM less likely to happen.  It is a bit excessive to perform WB by all
 220     // threads, but this guarantees the very first evacuation would be the PLL.
 221     //
 222     // This pre-evac can still silently fail with OOME here, and PLL would not
 223     // get evacuated. This would mean next VMOperation would try to evac PLL in
 224     // SLT thread. We make additional effort to recover from that OOME in SLT,
 225     // see ShenandoahHeap::oom_during_evacuation(). It seems to be the lesser evil
 226     // to do there, because we cannot trigger Full GC right here, when we are
 227     // in another VMOperation.
 228 
 229     ShenandoahEvacOOMScopeLeaver leaver;
 230     oop pll = java_lang_ref_Reference::pending_list_lock();
 231     oopDesc::bs()->write_barrier(pll);
 232   }
 233 
 234   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 235   {
 236     ResourceMark rm;
 237     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
 238     Threads::possibly_parallel_oops_do(oops, NULL, NULL);
 239   }
 240 
 241   if (blobs != NULL) {
 242     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 243     _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
 244   }
 245 }
 246 
 247 // Implemenation of ParallelCLDRootIterator
 248 ParallelCLDRootIterator::ParallelCLDRootIterator() {
 249   assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
 250   ClassLoaderDataGraph::clear_claimed_marks();
 251 }
 252 
 253 void ParallelCLDRootIterator::root_cld_do(CLDClosure* strong, CLDClosure* weak) {
 254     ClassLoaderDataGraph::roots_cld_do(strong, weak);
 255 }