< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp

Print this page
rev 10690 : [backport] Cleanup header files and forward declarations
rev 10724 : [backport] Add JFR parallel and concurrent events (infrastructure)
rev 10752 : [backport] Pre-evac JVMTI roots + fixes
rev 10772 : [backport] Update copyrights
   1 /*
   2  * Copyright (c) 2015, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahStringDedup.hpp"

  37 #include "memory/allocation.inline.hpp"
  38 #include "runtime/fprofiler.hpp"
  39 #include "runtime/mutex.hpp"

  40 #include "services/management.hpp"
  41 
  42 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
  43                                                  ShenandoahPhaseTimings::Phase phase) :
  44   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
  45   _srs(heap, true),
  46   _phase(phase),
  47   _coderoots_all_iterator(ShenandoahCodeRoots::iterator()),
  48   _om_iterator(ObjectSynchronizer::parallel_iterator())
  49 {
  50   heap->phase_timings()->record_workers_start(_phase);
  51   _process_strong_tasks->set_n_threads(n_workers);
  52   heap->set_par_threads(n_workers);
  53 
  54   if (ShenandoahStringDedup::is_enabled()) {
  55     ShenandoahStringDedup::clear_claimed();
  56   }
  57 }
  58 
  59 ShenandoahRootProcessor::~ShenandoahRootProcessor() {


 203     ShenandoahStringDedup::parallel_oops_do(weak_roots);
 204   }
 205 
 206   {
 207     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
 208     if (ShenandoahFastSyncRoots && MonitorInUseLists) {
 209       ObjectSynchronizer::oops_do(strong_roots);
 210     } else {
 211       while(_om_iterator.parallel_oops_do(strong_roots));
 212     }
 213   }
 214   // All threads execute the following. A specific chunk of buckets
 215   // from the StringTable are the individual tasks.
 216   if (weak_roots != NULL) {
 217     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringTableRoots, worker_id);
 218     StringTable::possibly_parallel_oops_do(weak_roots);
 219   }
 220 }
 221 
 222 ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) :

 223   _srs(heap, true),
 224   _phase(phase),
 225   _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
 226 {
 227   heap->set_par_threads(n_workers);
 228   heap->phase_timings()->record_workers_start(_phase);
 229 }
 230 
 231 ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {

 232   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 233 }
 234 
 235 void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
 236                                                      CodeBlobClosure* blobs,
 237                                                      uint worker_id) {
 238   ShenandoahHeap* heap = ShenandoahHeap::heap();
 239 
 240   {
 241     // Evacuate the PLL here so that the SurrogateLockerThread doesn't
 242     // have to. SurrogateLockerThread can execute write barrier in VMOperation
 243     // prolog. If the SLT runs into OOM during that evacuation, the VMOperation
 244     // may deadlock. Doing this evacuation the first thing makes that critical
 245     // OOM less likely to happen.  It is a bit excessive to perform WB by all
 246     // threads, but this guarantees the very first evacuation would be the PLL.
 247     //
 248     // This pre-evac can still silently fail with OOME here, and PLL would not
 249     // get evacuated. This would mean next VMOperation would try to evac PLL in
 250     // SLT thread. We make additional effort to recover from that OOME in SLT,
 251     // see ShenandoahHeap::oom_during_evacuation(). It seems to be the lesser evil
 252     // to do there, because we cannot trigger Full GC right here, when we are
 253     // in another VMOperation.
 254 
 255     oop pll = java_lang_ref_Reference::pending_list_lock();
 256     oopDesc::bs()->write_barrier(pll);
 257   }
 258 
 259   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 260   {
 261     ResourceMark rm;
 262     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
 263     Threads::possibly_parallel_oops_do(oops, NULL, NULL);
 264   }
 265 
 266   if (blobs != NULL) {
 267     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 268     _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);






 269   }
 270 }
 271 
 272 // Implemenation of ParallelCLDRootIterator
 273 ParallelCLDRootIterator::ParallelCLDRootIterator() {
 274   assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
 275   ClassLoaderDataGraph::clear_claimed_marks();
 276 }
 277 
 278 void ParallelCLDRootIterator::root_cld_do(CLDClosure* strong, CLDClosure* weak) {
 279     ClassLoaderDataGraph::roots_cld_do(strong, weak);
 280 }
   1 /*
   2  * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"

  33 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahStringDedup.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahTimingTracker.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "runtime/fprofiler.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "runtime/thread.hpp"
  41 #include "services/management.hpp"
  42 
  43 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
  44                                                  ShenandoahPhaseTimings::Phase phase) :
  45   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),
  46   _srs(heap, true),
  47   _phase(phase),
  48   _coderoots_all_iterator(ShenandoahCodeRoots::iterator()),
  49   _om_iterator(ObjectSynchronizer::parallel_iterator())
  50 {
  51   heap->phase_timings()->record_workers_start(_phase);
  52   _process_strong_tasks->set_n_threads(n_workers);
  53   heap->set_par_threads(n_workers);
  54 
  55   if (ShenandoahStringDedup::is_enabled()) {
  56     ShenandoahStringDedup::clear_claimed();
  57   }
  58 }
  59 
  60 ShenandoahRootProcessor::~ShenandoahRootProcessor() {


 204     ShenandoahStringDedup::parallel_oops_do(weak_roots);
 205   }
 206 
 207   {
 208     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id);
 209     if (ShenandoahFastSyncRoots && MonitorInUseLists) {
 210       ObjectSynchronizer::oops_do(strong_roots);
 211     } else {
 212       while(_om_iterator.parallel_oops_do(strong_roots));
 213     }
 214   }
 215   // All threads execute the following. A specific chunk of buckets
 216   // from the StringTable are the individual tasks.
 217   if (weak_roots != NULL) {
 218     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::StringTableRoots, worker_id);
 219     StringTable::possibly_parallel_oops_do(weak_roots);
 220   }
 221 }
 222 
 223 ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) :
 224   _evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
 225   _srs(heap, true),
 226   _phase(phase),
 227   _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
 228 {
 229   heap->set_par_threads(n_workers);
 230   heap->phase_timings()->record_workers_start(_phase);
 231 }
 232 
 233 ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
 234   delete _evacuation_tasks;
 235   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 236 }
 237 
 238 void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
 239                                                      CodeBlobClosure* blobs,
 240                                                      uint worker_id) {
 241   ShenandoahHeap* heap = ShenandoahHeap::heap();
 242 
 243   {
 244     // Evacuate the PLL here so that the SurrogateLockerThread doesn't
 245     // have to. SurrogateLockerThread can execute write barrier in VMOperation
 246     // prolog. If the SLT runs into OOM during that evacuation, the VMOperation
 247     // may deadlock. Doing this evacuation the first thing makes that critical
 248     // OOM less likely to happen.  It is a bit excessive to perform WB by all
 249     // threads, but this guarantees the very first evacuation would be the PLL.
 250     //
 251     // This pre-evac can still silently fail with OOME here, and PLL would not
 252     // get evacuated. This would mean next VMOperation would try to evac PLL in
 253     // SLT thread. We make additional effort to recover from that OOME in SLT,
 254     // see ShenandoahHeap::oom_during_evacuation(). It seems to be the lesser evil
 255     // to do there, because we cannot trigger Full GC right here, when we are
 256     // in another VMOperation.
 257 
 258     oop pll = java_lang_ref_Reference::pending_list_lock();
 259     oopDesc::bs()->write_barrier(pll);
 260   }
 261 
 262   ShenandoahWorkerTimings* worker_times = ShenandoahHeap::heap()->phase_timings()->worker_times();
 263   {
 264     ResourceMark rm;
 265     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::ThreadRoots, worker_id);
 266     Threads::possibly_parallel_oops_do(oops, NULL, NULL);
 267   }
 268 
 269   if (blobs != NULL) {
 270     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 271     _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
 272   }
 273 
 274   if (_evacuation_tasks->is_task_claimed(SHENANDOAH_EVAC_jvmti_oops_do)) {
 275     ShenandoahForwardedIsAliveClosure is_alive;
 276     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
 277     JvmtiExport::weak_oops_do(&is_alive, oops);
 278   }
 279 }
 280 
 281 // Implemenation of ParallelCLDRootIterator
 282 ParallelCLDRootIterator::ParallelCLDRootIterator() {
 283   assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
 284   ClassLoaderDataGraph::clear_claimed_marks();
 285 }
 286 
 287 void ParallelCLDRootIterator::root_cld_do(CLDClosure* strong, CLDClosure* weak) {
 288     ClassLoaderDataGraph::roots_cld_do(strong, weak);
 289 }
< prev index next >