< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp

Print this page
rev 10690 : [backport] Cleanup header files and forward declarations
rev 10724 : [backport] Add JFR parallel and concurrent events (infrastructure)
rev 10752 : [backport] Pre-evac JVMTI roots + fixes
rev 10772 : [backport] Update copyrights

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2015, 2017, Red Hat, Inc. and/or its affiliates.
+ * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.
  *

@@ -21,24 +21,25 @@
  *
  */
 
 #include "precompiled.hpp"
 
-#include "classfile/javaClasses.hpp"
+#include "classfile/classLoaderData.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc_implementation/shenandoah/shenandoahStringDedup.hpp"
+#include "gc_implementation/shenandoah/shenandoahTimingTracker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/fprofiler.hpp"
-#include "runtime/mutex.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/thread.hpp"
 #include "services/management.hpp"
 
 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahHeap* heap, uint n_workers,
                                                  ShenandoahPhaseTimings::Phase phase) :
   _process_strong_tasks(new SubTasksDone(SHENANDOAH_RP_PS_NumElements)),

@@ -218,19 +219,21 @@
     StringTable::possibly_parallel_oops_do(weak_roots);
   }
 }
 
 ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahHeap* heap, uint n_workers, ShenandoahPhaseTimings::Phase phase) :
+  _evacuation_tasks(new SubTasksDone(SHENANDOAH_EVAC_NumElements)),
   _srs(heap, true),
   _phase(phase),
   _coderoots_cset_iterator(ShenandoahCodeRoots::cset_iterator())
 {
   heap->set_par_threads(n_workers);
   heap->phase_timings()->record_workers_start(_phase);
 }
 
 ShenandoahRootEvacuator::~ShenandoahRootEvacuator() {
+  delete _evacuation_tasks;
   ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 }
 
 void ShenandoahRootEvacuator::process_evacuate_roots(OopClosure* oops,
                                                      CodeBlobClosure* blobs,

@@ -265,10 +268,16 @@
 
   if (blobs != NULL) {
     ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
     _coderoots_cset_iterator.possibly_parallel_blobs_do(blobs);
   }
+
+  if (_evacuation_tasks->is_task_claimed(SHENANDOAH_EVAC_jvmti_oops_do)) {
+    ShenandoahForwardedIsAliveClosure is_alive;
+    ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::JVMTIRoots, worker_id);
+    JvmtiExport::weak_oops_do(&is_alive, oops);
+  }
 }
 
 // Implemenation of ParallelCLDRootIterator
 ParallelCLDRootIterator::ParallelCLDRootIterator() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint");
< prev index next >