< prev index next >
src/hotspot/share/gc/shared/taskTerminator.cpp
Print this page
rev 59861 : [mq]: 8245721-refactor-taskterminator
*** 1,7 ****
/*
! * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
--- 1,7 ----
/*
! * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
*** 23,35 ****
--- 23,45 ----
*
*/
#include "precompiled.hpp"
+ #include "gc/shared/gc_globals.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/taskqueue.hpp"
#include "logging/log.hpp"
+ #include "runtime/mutex.hpp"
+ #include "runtime/mutexLocker.hpp"
+ #include "runtime/thread.hpp"
+
+ void TaskTerminator::SpinContext::reset() {
+ yield_count = 0;
+ hard_spin_count = 0;
+ hard_spin_limit = WorkStealingHardSpins;
+ }
TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
_n_threads(n_threads),
_queue_set(queue_set),
_offered_termination(0),
*** 61,71 ****
}
void TaskTerminator::reset_for_reuse() {
if (_offered_termination != 0) {
assert(_offered_termination == _n_threads,
! "Terminator may still be in use");
_offered_termination = 0;
}
}
void TaskTerminator::reset_for_reuse(uint n_threads) {
--- 71,82 ----
}
void TaskTerminator::reset_for_reuse() {
if (_offered_termination != 0) {
assert(_offered_termination == _n_threads,
! "Only %u of %u threads offered termination", _offered_termination, _n_threads);
! assert(_spin_master == NULL, "Leftover spin master " PTR_FORMAT, p2i(_spin_master));
_offered_termination = 0;
}
}
void TaskTerminator::reset_for_reuse(uint n_threads) {
*** 79,88 ****
--- 90,145 ----
size_t TaskTerminator::tasks_in_queue_set() const {
return _queue_set->tasks();
}
+ void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) {
+ assert(_blocker->is_locked(), "must be");
+ assert(_blocker->owned_by_self(), "must be");
+ assert(_offered_termination >= 1, "must be");
+
+ if (_spin_master == this_thread) {
+ _spin_master = NULL;
+ }
+
+ if (tasks >= _offered_termination - 1) {
+ _blocker->notify_all();
+ } else {
+ for (; tasks > 1; tasks--) {
+ _blocker->notify();
+ }
+ }
+ }
+
+ bool TaskTerminator::do_spin_iteration(SpinContext& spin_context) {
+ assert(!_blocker->owned_by_self(), "should not be owned by self");
+
+ if (spin_context.yield_count <= WorkStealingYieldsBeforeSleep) {
+ spin_context.yield_count++;
+ // Periodically call yield() instead spinning
+ // After WorkStealingSpinToYieldRatio spins, do a yield() call
+ // and reset the counts and starting limit.
+ if (spin_context.hard_spin_count > WorkStealingSpinToYieldRatio) {
+ os::naked_yield();
+ spin_context.hard_spin_count = 0;
+ spin_context.hard_spin_limit = WorkStealingHardSpins;
+ } else {
+ // Hard spin this time
+ // Increase the hard spinning period but only up to a limit.
+ spin_context.hard_spin_limit = MIN2(2 * spin_context.hard_spin_limit,
+ (uint) WorkStealingHardSpins);
+ for (uint j = 0; j < spin_context.hard_spin_limit; j++) {
+ SpinPause();
+ }
+ spin_context.hard_spin_count++;
+ }
+ return false;
+ } else {
+ return true;
+ }
+ }
+
bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
assert(_n_threads > 0, "Initialization is incorrect");
assert(_offered_termination < _n_threads, "Invariant");
assert(_blocker != NULL, "Invariant");
*** 91,230 ****
_offered_termination = 1;
assert_queue_set_empty();
return true;
}
! _blocker->lock_without_safepoint_check();
_offered_termination++;
! // All arrived, done
if (_offered_termination == _n_threads) {
! _blocker->notify_all();
! _blocker->unlock();
assert_queue_set_empty();
return true;
}
! Thread* the_thread = Thread::current();
! while (true) {
if (_spin_master == NULL) {
_spin_master = the_thread;
! _blocker->unlock();
! if (do_spin_master_work(terminator)) {
! assert(_offered_termination == _n_threads, "termination condition");
! assert_queue_set_empty();
! return true;
! } else {
! _blocker->lock_without_safepoint_check();
! // There is possibility that termination is reached between dropping the lock
! // before returning from do_spin_master_work() and acquiring lock above.
! if (_offered_termination == _n_threads) {
! _blocker->unlock();
! assert_queue_set_empty();
! return true;
! }
}
- } else {
- _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
if (_offered_termination == _n_threads) {
! _blocker->unlock();
assert_queue_set_empty();
return true;
! }
! }
!
! size_t tasks = tasks_in_queue_set();
! if (exit_termination(tasks, terminator)) {
! assert_lock_strong(_blocker);
_offered_termination--;
- _blocker->unlock();
return false;
}
! }
! }
!
! bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
! uint yield_count = 0;
! // Number of hard spin loops done since last yield
! uint hard_spin_count = 0;
! // Number of iterations in the hard spin loop.
! uint hard_spin_limit = WorkStealingHardSpins;
!
! // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
! // If it is greater than 0, then start with a small number
! // of spins and increase number with each turn at spinning until
! // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
! // Then do a yield() call and start spinning afresh.
! if (WorkStealingSpinToYieldRatio > 0) {
! hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
! hard_spin_limit = MAX2(hard_spin_limit, 1U);
! }
! // Remember the initial spin limit.
! uint hard_spin_start = hard_spin_limit;
!
! // Loop waiting for all threads to offer termination or
! // more work.
! while (true) {
! // Look for more work.
! // Periodically sleep() instead of yield() to give threads
! // waiting on the cores the chance to grab this code
! if (yield_count <= WorkStealingYieldsBeforeSleep) {
! // Do a yield or hardspin. For purposes of deciding whether
! // to sleep, count this as a yield.
! yield_count++;
!
! // Periodically call yield() instead spinning
! // After WorkStealingSpinToYieldRatio spins, do a yield() call
! // and reset the counts and starting limit.
! if (hard_spin_count > WorkStealingSpinToYieldRatio) {
! yield();
! hard_spin_count = 0;
! hard_spin_limit = hard_spin_start;
! } else {
! // Hard spin this time
! // Increase the hard spinning period but only up to a limit.
! hard_spin_limit = MIN2(2*hard_spin_limit,
! (uint) WorkStealingHardSpins);
! for (uint j = 0; j < hard_spin_limit; j++) {
! SpinPause();
! }
! hard_spin_count++;
! }
! } else {
! log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
! p2i(Thread::current()), yield_count);
! yield_count = 0;
!
! MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
_spin_master = NULL;
- locker.wait(WorkStealingSleepMillis);
- if (_spin_master == NULL) {
- _spin_master = Thread::current();
- } else {
- return false;
- }
}
! size_t tasks = tasks_in_queue_set();
! bool exit = exit_termination(tasks, terminator);
! {
! MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
! // Termination condition reached
if (_offered_termination == _n_threads) {
! _spin_master = NULL;
return true;
- } else if (exit) {
- if (tasks >= _offered_termination - 1) {
- locker.notify_all();
} else {
! for (; tasks > 1; tasks--) {
! locker.notify();
! }
! }
! _spin_master = NULL;
return false;
}
}
}
}
--- 148,214 ----
_offered_termination = 1;
assert_queue_set_empty();
return true;
}
! Thread* the_thread = Thread::current();
! SpinContext spin_context;
!
! MutexLocker x(_blocker, Mutex::_no_safepoint_check_flag);
_offered_termination++;
!
if (_offered_termination == _n_threads) {
! prepare_for_return(the_thread);
assert_queue_set_empty();
return true;
}
! for (;;) {
if (_spin_master == NULL) {
_spin_master = the_thread;
! bool giveup_spin;
! do {
! size_t tasks;
! bool should_exit_termination;
! {
! MutexUnlocker y(_blocker, Mutex::_no_safepoint_check_flag);
! giveup_spin = do_spin_iteration(spin_context);
! // Dirty read of exit condition.
! tasks = tasks_in_queue_set();
! should_exit_termination = exit_termination(tasks, terminator);
}
+ // Immediately check exit conditions after re-acquiring the lock using the
+ // information gathered just recently.
if (_offered_termination == _n_threads) {
! prepare_for_return(the_thread);
assert_queue_set_empty();
return true;
! } else if (should_exit_termination) {
! prepare_for_return(the_thread, tasks);
_offered_termination--;
return false;
}
! } while (!giveup_spin);
! // Give up spin master before sleeping.
_spin_master = NULL;
}
+ _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
! // Immediately check exit conditions after re-acquiring the lock.
if (_offered_termination == _n_threads) {
! prepare_for_return(the_thread);
! assert_queue_set_empty();
return true;
} else {
! size_t tasks = tasks_in_queue_set();
! if (exit_termination(tasks, terminator)) {
! prepare_for_return(the_thread, tasks);
! _offered_termination--;
return false;
}
}
+ spin_context.reset();
}
}
< prev index next >