< prev index next >
src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
Print this page
rev 59534 : 8245961: Shenandoah: move some root marking to concurrent phase
*** 172,182 ****
shenandoah_assert_rp_isalive_installed();
} else {
rp = NULL;
}
- _cm->concurrent_scan_code_roots(worker_id, rp);
_cm->mark_loop(worker_id, _terminator, rp,
true, // cancellable
ShenandoahStringDedup::is_enabled()); // perform string dedup
}
};
--- 172,181 ----
*** 213,222 ****
--- 212,318 ----
}
}
}
};
+ template <bool CONCURRENT, bool SINGLE_THREADED>
+ class ShenandoahConcurrentRootsIterator {
+ private:
+ ShenandoahVMRoots<CONCURRENT> _vm_roots;
+ ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>
+ _cld_roots;
+ ShenandoahNMethodTableSnapshot* _codecache_snapshot;
+ ShenandoahPhaseTimings::Phase _phase;
+
+ public:
+ ShenandoahConcurrentRootsIterator(ShenandoahPhaseTimings::Phase phase);
+ ~ShenandoahConcurrentRootsIterator();
+
+ void oops_do(OopClosure* oops, uint worker_id);
+ };
+
+ template <bool CONCURRENT, bool SINGLE_THREADED>
+ ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::ShenandoahConcurrentRootsIterator(ShenandoahPhaseTimings::Phase phase) :
+ _vm_roots(phase),
+ _cld_roots(phase),
+ _codecache_snapshot(NULL),
+ _phase(phase) {
+ if (!ShenandoahHeap::heap()->unload_classes()) {
+ if (CONCURRENT) {
+ CodeCache_lock->lock_without_safepoint_check();
+ } else {
+ assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+ }
+ _codecache_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
+ }
+ assert(!CONCURRENT || !ShenandoahHeap::heap()->has_forwarded_objects(), "Not expecting forwarded pointers during concurrent marking");
+ }
+
+ template <bool CONCURRENT, bool SINGLE_THREADED>
+ ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::~ShenandoahConcurrentRootsIterator() {
+ if (!ShenandoahHeap::heap()->unload_classes()) {
+ ShenandoahCodeRoots::table()->finish_iteration(_codecache_snapshot);
+ if (CONCURRENT) {
+ CodeCache_lock->unlock();
+ }
+ }
+ }
+
+ template <bool CONCURRENT, bool SINGLE_THREADED>
+ void ShenandoahConcurrentRootsIterator<CONCURRENT, SINGLE_THREADED>::oops_do(OopClosure* oops, uint worker_id) {
+ ShenandoahHeap* const heap = ShenandoahHeap::heap();
+ CLDToOopClosure clds_cl(oops, CONCURRENT ? ClassLoaderData::_claim_strong : ClassLoaderData::_claim_none);
+ _vm_roots.oops_do(oops, worker_id);
+
+ if (!heap->unload_classes()) {
+ _cld_roots.cld_do(&clds_cl, worker_id);
+
+ ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+ CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations);
+ _codecache_snapshot->parallel_blobs_do(&blobs);
+ } else {
+ _cld_roots.always_strong_cld_do(&clds_cl, worker_id);
+ }
+ }
+
+ // Process concurrent roots at safepoints
+ template <typename CLOSURE>
+ class ShenandoahProcessConcurrentRootsTask : public AbstractGangTask {
+ private:
+ ShenandoahConcurrentRootsIterator<false /* concurrent */, false /* single_thread */> _itr;
+ ShenandoahConcurrentMark* const _cm;
+ ReferenceProcessor* _rp;
+ public:
+
+ ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
+ ShenandoahPhaseTimings::Phase phase);
+ void work(uint worker_id);
+ };
+
+ template <typename CLOSURE>
+ ShenandoahProcessConcurrentRootsTask<CLOSURE>::ShenandoahProcessConcurrentRootsTask(ShenandoahConcurrentMark* cm,
+ ShenandoahPhaseTimings::Phase phase) :
+ AbstractGangTask("Shenandoah STW Concurrent Mark Task"),
+ _itr(phase),
+ _cm(cm),
+ _rp(NULL) {
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+ if (heap->process_references()) {
+ _rp = heap->ref_processor();
+ shenandoah_assert_rp_isalive_installed();
+ }
+ }
+
+ template <typename CLOSURE>
+ void ShenandoahProcessConcurrentRootsTask<CLOSURE>::work(uint worker_id) {
+ ShenandoahParallelWorkerSession worker_session(worker_id);
+ ShenandoahObjToScanQueue* q = _cm->task_queues()->queue(worker_id);
+ CLOSURE cl(q, _rp);
+ _itr.oops_do(&cl, worker_id);
+ }
+
+
class ShenandoahFinalMarkingTask : public AbstractGangTask {
private:
ShenandoahConcurrentMark* _cm;
TaskTerminator* _terminator;
bool _dedup_string;
*** 265,281 ****
do_nmethods ? &blobsCl : NULL);
Threads::threads_do(&tc);
}
}
- if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {
- // Full GC does not execute concurrent cycle.
- // Degenerated cycle may bypass concurrent cycle.
- // So code roots might not be scanned, let's scan here.
- _cm->concurrent_scan_code_roots(worker_id, rp);
- }
-
_cm->mark_loop(worker_id, _terminator, rp,
false, // not cancellable
_dedup_string);
assert(_cm->task_queues()->is_empty(), "Should be empty");
--- 361,370 ----
*** 306,317 ****
// No need to update references, which means the heap is stable.
// Can save time not walking through forwarding pointers.
ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
workers->run_task(&mark_roots);
}
-
- clear_claim_codecache();
}
void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||
--- 395,404 ----
*** 388,425 ****
task_queue->initialize();
_task_queues->register_queue(i, task_queue);
}
}
! void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
! if (_heap->unload_classes()) {
! return;
! }
!
! if (claim_codecache()) {
! ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
! MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
! // TODO: We can not honor StringDeduplication here, due to lock ranking
! // inversion. So, we may miss some deduplication candidates.
! if (_heap->has_forwarded_objects()) {
! ShenandoahMarkResolveRefsClosure cl(q, rp);
! CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
! CodeCache::blobs_do(&blobs);
! } else {
! ShenandoahMarkRefsClosure cl(q, rp);
! CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
! CodeCache::blobs_do(&blobs);
! }
! }
}
void ShenandoahConcurrentMark::mark_from_roots() {
WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers();
if (_heap->process_references()) {
! ReferenceProcessor* rp = _heap->ref_processor();
rp->set_active_mt_degree(nworkers);
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_no_refs*/);
rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
--- 475,523 ----
task_queue->initialize();
_task_queues->register_queue(i, task_queue);
}
}
! // Mark concurrent roots during concurrent phases
! class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask {
! private:
! SuspendibleThreadSetJoiner _sts_joiner;
! ShenandoahConcurrentRootsIterator<true /* concurrent */, false /* single-threaded */> _itr;
! ShenandoahObjToScanQueueSet* const _queue_set;
! ReferenceProcessor* const _rp;
!
! public:
! ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
! ReferenceProcessor* rp,
! ShenandoahPhaseTimings::Phase phase);
! void work(uint worker_id);
! };
!
! ShenandoahMarkConcurrentRootsTask::ShenandoahMarkConcurrentRootsTask(ShenandoahObjToScanQueueSet* qs,
! ReferenceProcessor* rp,
! ShenandoahPhaseTimings::Phase phase) :
! AbstractGangTask("Shenandoah Concurrent Mark Task"),
! _itr(phase),
! _queue_set(qs),
! _rp(rp) {
! assert(!ShenandoahHeap::heap()->has_forwarded_objects(), "Not expected");
! }
!
! void ShenandoahMarkConcurrentRootsTask::work(uint worker_id) {
! ShenandoahConcurrentWorkerSession worker_session(worker_id);
! ShenandoahObjToScanQueue* q = _queue_set->queue(worker_id);
! ShenandoahMarkResolveRefsClosure cl(q, _rp);
! _itr.oops_do(&cl, worker_id);
}
void ShenandoahConcurrentMark::mark_from_roots() {
WorkGang* workers = _heap->workers();
uint nworkers = workers->active_workers();
+ ReferenceProcessor* rp = NULL;
if (_heap->process_references()) {
! rp = _heap->ref_processor();
rp->set_active_mt_degree(nworkers);
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_no_refs*/);
rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
*** 430,439 ****
--- 528,544 ----
ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
task_queues()->reserve(nworkers);
{
+ ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_mark_roots);
+ // Use separate task to mark concurrent roots, since it may hold ClassLoaderData_lock and CodeCache_lock
+ ShenandoahMarkConcurrentRootsTask task(task_queues(), rp, ShenandoahPhaseTimings::conc_mark_roots);
+ workers->run_task(&task);
+ }
+
+ {
TaskTerminator terminator(nworkers, task_queues());
ShenandoahConcurrentMarkingTask task(this, &terminator);
workers->run_task(&task);
}
*** 443,452 ****
--- 548,583 ----
void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
uint nworkers = _heap->workers()->active_workers();
+ {
+ shenandoah_assert_rp_isalive_not_installed();
+ ShenandoahIsAliveSelector is_alive;
+ ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
+
+
+ // Full GC does not execute concurrent cycle.
+ // Degenerated cycle may bypass concurrent cycle.
+ // So concurrent roots might not be scanned, scan them here.
+ // Ideally, this should be piggyback to ShenandoahFinalMarkingTask, but it makes time tracking
+ // very hard. Given full GC and degenerated GC should be rare, let's use separate task.
+ if (_heap->is_degenerated_gc_in_progress() || _heap->is_full_gc_in_progress()) {
+ ShenandoahPhaseTimings::Phase phase = _heap->is_full_gc_in_progress() ?
+ ShenandoahPhaseTimings::full_gc_scan_conc_roots :
+ ShenandoahPhaseTimings::degen_gc_scan_conc_roots;
+ ShenandoahGCPhase gc_phase(phase);
+ if (_heap->has_forwarded_objects()) {
+ ShenandoahProcessConcurrentRootsTask<ShenandoahMarkResolveRefsClosure> task(this, phase);
+ _heap->workers()->run_task(&task);
+ } else {
+ ShenandoahProcessConcurrentRootsTask<ShenandoahMarkRefsClosure> task(this, phase);
+ _heap->workers()->run_task(&task);
+ }
+ }
+
+
// Finally mark everything else we've got in our queues during the previous steps.
// It does two different things for concurrent vs. mark-compact GC:
// - For concurrent GC, it starts with empty task queues, drains the remaining
// SATB buffers, and then completes the marking closure.
// - For mark-compact GC, it starts out with the task queues seeded by initial
*** 456,476 ****
ShenandoahGCPhase phase(full_gc ?
ShenandoahPhaseTimings::full_gc_mark_finish_queues :
ShenandoahPhaseTimings::finish_queues);
task_queues()->reserve(nworkers);
- shenandoah_assert_rp_isalive_not_installed();
- ShenandoahIsAliveSelector is_alive;
- ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
-
StrongRootsScope scope(nworkers);
TaskTerminator terminator(nworkers, task_queues());
ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
_heap->workers()->run_task(&task);
}
assert(task_queues()->is_empty(), "Should be empty");
// When we're done marking everything, we process weak references.
if (_heap->process_references()) {
weak_refs_work(full_gc);
}
--- 587,604 ----
ShenandoahGCPhase phase(full_gc ?
ShenandoahPhaseTimings::full_gc_mark_finish_queues :
ShenandoahPhaseTimings::finish_queues);
task_queues()->reserve(nworkers);
StrongRootsScope scope(nworkers);
TaskTerminator terminator(nworkers, task_queues());
ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
_heap->workers()->run_task(&task);
}
assert(task_queues()->is_empty(), "Should be empty");
+ }
// When we're done marking everything, we process weak references.
if (_heap->process_references()) {
weak_refs_work(full_gc);
}
*** 940,952 ****
ShenandoahTerminatorTerminator tt(heap);
if (terminator->offer_termination(&tt)) return;
}
}
}
-
- bool ShenandoahConcurrentMark::claim_codecache() {
- return _claimed_codecache.try_set();
- }
-
- void ShenandoahConcurrentMark::clear_claim_codecache() {
- _claimed_codecache.unset();
- }
--- 1068,1072 ----
< prev index next >