< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp

Print this page
rev 59534 : 8245961: Shenandoah: move some root marking to concurrent phase

@@ -172,11 +172,10 @@
       shenandoah_assert_rp_isalive_installed();
     } else {
       rp = NULL;
     }
 
-    _cm->concurrent_scan_code_roots(worker_id, rp);
     _cm->mark_loop(worker_id, _terminator, rp,
                    true, // cancellable
                    ShenandoahStringDedup::is_enabled()); // perform string dedup
   }
 };

@@ -213,10 +212,47 @@
       }
     }
   }
 };
 
+// Process concurrent roots at safepoints
+template <typename CLOSURE>
+class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask {
+private:
+  ShenandoahConcurrentRootScanner<false /* concurrent */, false /* single_thread */> _itr;
+  ShenandoahConcurrentMark* const _cm;
+  ReferenceProcessor*             _rp;
+public:
+
+  ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
+                                       ShenandoahPhaseTimings::Phase phase);
+  void work(uint worker_id);
+};
+
+template <typename CLOSURE>
+ShenandoahProcessConcurrentRootsTask<CLOSURE>::ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
+                                                                                    ShenandoahPhaseTimings::Phase phase) :
+  AbstractGangTask("Shenandoah STW Concurrent Mark Task"),
+  _itr(phase),
+  _cm(cm),
+  _rp(NULL) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  if (heap->process_references()) {
+    _rp = heap->ref_processor();
+    shenandoah_assert_rp_isalive_installed();
+  }
+}
+
+template <typename CLOSURE>
+void ShenandoahProcessConcurrentRootsTask<CLOSURE>::work(uint worker_id) {
+  ShenandoahParallelWorkerSession worker_session(worker_id);
+  ShenandoahObjToScanQueue* q = _cm->task_queues()->queue(worker_id);
+  CLOSURE cl(q, _rp);
+  _itr.oops_do(&cl, worker_id);
+}
+
+
 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 private:
   ShenandoahConcurrentMark* _cm;
   TaskTerminator*           _terminator;
   bool _dedup_string;

@@ -265,17 +301,10 @@
                                                           do_nmethods ? &blobsCl : NULL);
         Threads::threads_do(&tc);
       }
     }
 
-    if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {
-      // Full GC does not execute concurrent cycle.
-      // Degenerated cycle may bypass concurrent cycle.
-      // So code roots might not be scanned, let's scan here.
-      _cm->concurrent_scan_code_roots(worker_id, rp);
-    }
-
     _cm->mark_loop(worker_id, _terminator, rp,
                    false, // not cancellable
                    _dedup_string);
 
     assert(_cm->task_queues()->is_empty(), "Should be empty");

@@ -306,12 +335,10 @@
     // No need to update references, which means the heap is stable.
     // Can save time not walking through forwarding pointers.
     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
     workers->run_task(&mark_roots);
   }
-
-  clear_claim_codecache();
 }
 
 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
   assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||

@@ -388,38 +415,49 @@
     task_queue->initialize();
     _task_queues->register_queue(i, task_queue);
   }
 }
 
-void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
-  if (_heap->unload_classes()) {
-    return;
-  }
-
-  if (claim_codecache()) {
-    ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
-    MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    // TODO: We can not honor StringDeduplication here, due to lock ranking
-    // inversion. So, we may miss some deduplication candidates.
-    if (_heap->has_forwarded_objects()) {
-      ShenandoahMarkResolveRefsClosure cl(q, rp);
-      CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
-      CodeCache::blobs_do(&blobs);
-    } else {
-      ShenandoahMarkRefsClosure cl(q, rp);
-      CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
-      CodeCache::blobs_do(&blobs);
-    }
-  }
+// Mark concurrent roots during concurrent phases
+class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask {
+private:
+  SuspendibleThreadSetJoiner         _sts_joiner;
+  ShenandoahConcurrentRootScanner<true /* concurrent */, false /* single-threaded */> _itr;
+  ShenandoahObjToScanQueueSet* const _queue_set;
+  ReferenceProcessor* const          _rp;
+
+public:
+  ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
+                                    ReferenceProcessor* rp,
+                                    ShenandoahPhaseTimings::Phase phase);
+  void work(uint worker_id);
+};
+
+ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
+                                                                     ReferenceProcessor* rp,
+                                                                     ShenandoahPhaseTimings::Phase phase) :
+  AbstractGangTask("Shenandoah Concurrent Mark Task"),
+  _itr(phase),
+  _queue_set(qs),
+  _rp(rp) {
+  assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
+}
+
+void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) {
+  ShenandoahConcurrentWorkerSession worker_session(worker_id);
+  ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id);
+  ShenandoahMarkResolveRefsClosure cl(q, _rp);
+  _itr.oops_do(&cl, worker_id);
 }
 
 void ShenandoahConcurrentMark::mark_from_roots() {
   WorkGang* workers = _heap->workers();
   uint nworkers = workers->active_workers();
 
+  ReferenceProcessor* rp = NULL;
   if (_heap->process_references()) {
-    ReferenceProcessor* rp = _heap->ref_processor();
+    rp = _heap->ref_processor();
     rp->set_active_mt_degree(nworkers);
 
     // enable ("weak") refs discovery
     rp->enable_discovery(true /*verify_no_refs*/);
     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());

@@ -430,10 +468,17 @@
   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 
   task_queues()->reserve(nworkers);
 
   {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_mark_roots);
+    // Use separate task to mark concurrent roots, since it may hold ClassLoaderData_lock and CodeCache_lock
+    ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots);
+    workers->run_task(&task);
+  }
+
+  {
     TaskTerminator terminator(nworkers, task_queues());
     ShenandoahConcurrentMarkingTask task(this, &terminator);
     workers->run_task(&task);
   }
 

@@ -443,10 +488,36 @@
 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 
   uint nworkers = _heap->workers()->active_workers();
 
+  {
+    shenandoah_assert_rp_isalive_not_installed();
+    ShenandoahIsAliveSelector is_alive;
+    ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
+
+
+    // Full GC does not execute concurrent cycle.
+    // Degenerated cycle may bypass concurrent cycle.
+    // So concurrent roots might not be scanned, scan them here.
+    // Ideally, this should be piggyback to ShenandoahFinalMarkingTask, but it makes time tracking
+    // very hard. Given full GC and degenerated GC should be rare, let's use separate task.
+    if (_heap->is_degenerated_gc_in_progress() || _heap->is_full_gc_in_progress()) {
+      ShenandoahPhaseTimings::Phase phase = _heap->is_full_gc_in_progress() ?
+                                            ShenandoahPhaseTimings::full_gc_scan_conc_roots :
+                                            ShenandoahPhaseTimings::degen_gc_scan_conc_roots;
+      ShenandoahGCPhase gc_phase(phase);
+      if (_heap->has_forwarded_objects()) {
+        ShenandoahProcessConcurrentRootsTask<ShenandoahMarkResolveRefsClosure> task(this, phase);
+        _heap->workers()->run_task(&task);
+      } else {
+        ShenandoahProcessConcurrentRootsTask<ShenandoahMarkRefsClosure> task(this, phase);
+        _heap->workers()->run_task(&task);
+      }
+    }
+
+
   // Finally mark everything else we've got in our queues during the previous steps.
   // It does two different things for concurrent vs. mark-compact GC:
   // - For concurrent GC, it starts with empty task queues, drains the remaining
   //   SATB buffers, and then completes the marking closure.
   // - For mark-compact GC, it starts out with the task queues seeded by initial

@@ -456,21 +527,18 @@
     ShenandoahGCPhase phase(full_gc ?
                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
                             ShenandoahPhaseTimings::finish_queues);
     task_queues()->reserve(nworkers);
 
-    shenandoah_assert_rp_isalive_not_installed();
-    ShenandoahIsAliveSelector is_alive;
-    ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
-
     StrongRootsScope scope(nworkers);
     TaskTerminator terminator(nworkers, task_queues());
     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
     _heap->workers()->run_task(&task);
   }
 
   assert(task_queues()->is_empty(), "Should be empty");
+  }
 
   // When we're done marking everything, we process weak references.
   if (_heap->process_references()) {
     weak_refs_work(full_gc);
   }

@@ -940,13 +1008,5 @@
       ShenandoahTerminatorTerminator tt(heap);
       if (terminator->offer_termination(&tt)) return;
     }
   }
 }
-
-bool ShenandoahConcurrentMark::claim_codecache() {
-  return _claimed_codecache.try_set();
-}
-
-void ShenandoahConcurrentMark::clear_claim_codecache() {
-  _claimed_codecache.unset();
-}
< prev index next >