1 /*
   2  * Copyright (c) 2015, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahStringDedup.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "runtime/fprofiler.hpp"
  39 #include "runtime/mutex.hpp"
  40 #include "services/management.hpp"
  41 
  42 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
  43                                                  ShenandoahPhaseTimings::Phase phase) :
  44   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
  45   _srs(heap, true),
  46   _phase(phase),
  47   _coderoots_all_iterator(ShenandoahCodeRoots::iterator()),
  48   _om_iterator(ObjectSynchronizer::parallel_iterator())
  49 {
  50   heap->phase_timings()->record_workers_start(_phase);
  51   _process_strong_tasks->set_n_threads(n_workers);
  52   heap->set_par_threads(n_workers);
  53 
  54   if (ShenandoahStringDedup::is_enabled()) {
  55     ShenandoahStringDedup::clear_claimed();
  56   }
  57 }
  58 
  59 ShenandoahRootProcessor::~ShenandoahRootProcessor() {
  60   delete _process_strong_tasks;
  61   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
  62 }
  63 
  64 void ShenandoahRootProcessor::process_all_roots_slow(OopClosure* oops) {
  65   ShenandoahAlwaysTrueClosure always_true;
  66 
  67   CLDToOopClosure clds(oops);
  68   CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
  69 
  70   CodeCache::blobs_do(&blobs);
  71   ClassLoaderDataGraph::cld_do(&clds);
  72   Universe::oops_do(oops);
  73   FlatProfiler::oops_do(oops);
  74   Management::oops_do(oops);
  75   JvmtiExport::oops_do(oops);
  76   JNIHandles::oops_do(oops);
  77   JNIHandles::weak_oops_do(&always_true, oops);
  78   ObjectSynchronizer::oops_do(oops);
  79   SystemDictionary::roots_oops_do(oops, oops);
  80   StringTable::oops_do(oops);
  81 
  82   if (ShenandoahStringDedup::is_enabled()) {
  83     ShenandoahStringDedup::oops_do_slow(oops);
  84   }
  85 
  86   // Do thread roots the last. This allows verification code to find
  87   // any broken objects from those special roots first, not the accidental
  88   // dangling reference from the thread root.
  89   Threads::possibly_parallel_oops_do(oops, &clds, &blobs);
  90 }
  91 
  92 void ShenandoahRootProcessor::process_strong_roots(OopClosure* oops,
  93                                                    OopClosure* weak_oops,
  94                                                    CLDClosure* clds,
  95                                                    CLDClosure* weak_clds,
  96                                                    CodeBlobClosure* blobs,
  97                                                    ThreadClosure* thread_cl,
  98                                                    uint worker_id) {
  99   assert(thread_cl == NULL, "not implemented yet");
 100   process_java_roots(oops, clds, clds, weak_clds, blobs, worker_id);
 101   process_vm_roots(oops, NULL, weak_oops, worker_id);
 102 
 103   _process_strong_tasks->all_tasks_completed();
 104 }
 105 
 106 void ShenandoahRootProcessor::process_all_roots(OopClosure* oops,
 107                                                 OopClosure* weak_oops,
 108                                                 CLDClosure* clds,
 109                                                 CodeBlobClosure* blobs,
 110                                                 ThreadClosure* thread_cl,
 111                                                 uint worker_id) {
 112 
 113   assert(thread_cl == NULL, "not implemented yet");
 114   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 115   process_java_roots(oops, NULL, clds, clds, NULL, worker_id);
 116   process_vm_roots(oops, oops, weak_oops, worker_id);
 117 
 118   if (blobs != NULL) {
 119     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 120     _coderoots_all_iterator.possibly_parallel_blobs_do(blobs);
 121   }
 122 
 123   _process_strong_tasks->all_tasks_completed();
 124 }
 125 
 126 void ShenandoahRootProcessor::process_java_roots(OopClosure* strong_roots,
 127                                                  CLDClosure* thread_clds,
 128                                                  CLDClosure* strong_clds,
 129                                                  CLDClosure* weak_clds,
 130                                                  CodeBlobClosure* strong_code,
 131                                                  uint worker_id)
 132 {
 133   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 134   // Iterating over the CLDG and the Threads are done early to allow us to
 135   // first process the strong CLDs and nmethods and then, after a barrier,
 136   // let the thread process the weak CLDs and nmethods.
 137   {
 138     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CLDGRoots, worker_id);
 139     _cld_iterator.root_cld_do(strong_clds, weak_clds);
 140   }
 141 
 142   {
 143     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
 144     ResourceMark rm;
 145     Threads::possibly_parallel_oops_do(strong_roots, thread_clds, strong_code);
 146   }
 147 }
 148 
 149 void ShenandoahRootProcessor::process_vm_roots(OopClosure* strong_roots,
 150                                                OopClosure* weak_roots,
 151                                                OopClosure* jni_weak_roots,
 152                                                uint worker_id)
 153 {
 154   ShenandoahHeap* heap = ShenandoahHeap::heap();
 155 
 156   ShenandoahWorkerTimings* worker_times = heap->phase_timings()->worker_times();
 157   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Universe_oops_do)) {
 158     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::UniverseRoots, worker_id);
 159     Universe::oops_do(strong_roots);
 160   }
 161 
 162   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_oops_do)) {
 163     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIRoots, worker_id);
 164     JNIHandles::oops_do(strong_roots);
 165   }
 166 
 167   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_FlatProfiler_oops_do)) {
 168     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FlatProfilerRoots, worker_id);
 169     FlatProfiler::oops_do(strong_roots);
 170   }
 171   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_Management_oops_do)) {
 172     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ManagementRoots, worker_id);
 173     Management::oops_do(strong_roots);
 174   }
 175   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_jvmti_oops_do)) {
 176     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
 177     JvmtiExport::oops_do(strong_roots);
 178   }
 179   if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_SystemDictionary_oops_do)) {
 180     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id);
 181     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 182   }
 183 
 184   // Note: Workaround bugs with JNI weak reference handling during concurrent cycles,
 185   // by pessimistically assuming all JNI weak refs are alive. Achieve this by passing
 186   // stronger closure, where weaker one would suffice otherwise. This effectively makes
 187   // JNI weak refs non-reclaimable by concurrent GC, but they would be reclaimed by
 188   // STW GCs, that are not affected by the bug, nevertheless.
 189   if (!heap->is_full_gc_in_progress() && !heap->is_degenerated_gc_in_progress()) {
 190     jni_weak_roots = strong_roots;
 191   }
 192 
 193   if (jni_weak_roots != NULL) {
 194     if (!_process_strong_tasks->is_task_claimed(SHENANDOAH_RP_PS_JNIHandles_weak_oops_do)) {
 195       ShenandoahAlwaysTrueClosure always_true;
 196       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JNIWeakRoots, worker_id);
 197       JNIHandles::weak_oops_do(&always_true, jni_weak_roots);
 198     }
 199   }
 200 
 201   if (ShenandoahStringDedup::is_enabled() && weak_roots != NULL) {
 202     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringDedupRoots, worker_id);
 203     ShenandoahStringDedup::parallel_oops_do(weak_roots);
 204   }
 205 
 206   {
 207     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
 208     if (ShenandoahFastSyncRoots && MonitorInUseLists) {
 209       ObjectSynchronizer::oops_do(strong_roots);
 210     } else {
 211       while(_om_iterator.parallel_oops_do(strong_roots));
 212     }
 213   }
 214   // All threads execute the following. A specific chunk of buckets
 215   // from the StringTable are the individual tasks.
 216   if (weak_roots != NULL) {
 217     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringTableRoots, worker_id);
 218     StringTable::possibly_parallel_oops_do(weak_roots);
 219   }
 220 }
 221 
 222 ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) :
 223   _srs(heap, true),
 224   _phase(phase),
 225   _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
 226 {
 227   heap->set_par_threads(n_workers);
 228   heap->phase_timings()->record_workers_start(_phase);
 229 }
 230 
 231 ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
 232   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 233 }
 234 
 235 void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
 236                                                      CodeBlobClosure* blobs,
 237                                                      uint worker_id) {
 238   ShenandoahHeap* heap = ShenandoahHeap::heap();
 239 
 240   {
 241     // Evacuate the PLL here so that the SurrogateLockerThread doesn't
 242     // have to. SurrogateLockerThread can execute write barrier in VMOperation
 243     // prolog. If the SLT runs into OOM during that evacuation, the VMOperation
 244     // may deadlock. Doing this evacuation the first thing makes that critical
 245     // OOM less likely to happen.  It is a bit excessive to perform WB by all
 246     // threads, but this guarantees the very first evacuation would be the PLL.
 247     //
 248     // This pre-evac can still silently fail with OOME here, and PLL would not
 249     // get evacuated. This would mean next VMOperation would try to evac PLL in
 250     // SLT thread. We make additional effort to recover from that OOME in SLT,
 251     // see ShenandoahHeap::oom_during_evacuation(). It seems to be the lesser evil
 252     // to do there, because we cannot trigger Full GC right here, when we are
 253     // in another VMOperation.
 254 
 255     oop pll = java_lang_ref_Reference::pending_list_lock();
 256     oopDesc::bs()->write_barrier(pll);
 257   }
 258 
 259   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 260   {
 261     ResourceMark rm;
 262     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
 263     Threads::possibly_parallel_oops_do(oops, NULL, NULL);
 264   }
 265 
 266   if (blobs != NULL) {
 267     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 268     _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
 269   }
 270 }
 271 
 272 // Implemenation of ParallelCLDRootIterator
 273 ParallelCLDRootIterator::ParallelCLDRootIterator() {
 274   assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
 275   ClassLoaderDataGraph::clear_claimed_marks();
 276 }
 277 
 278 void ParallelCLDRootIterator::root_cld_do(CLDClosure* strong, CLDClosure* weak) {
 279     ClassLoaderDataGraph::roots_cld_do(strong, weak);
 280 }