< prev index next >

src/hotspot/share/gc/shared/taskTerminator.cpp

Print this page
rev 60497 : [mq]: 8245721-kbarrett-review2

*** 26,71 **** #include "precompiled.hpp" #include "gc/shared/taskTerminator.hpp" #include "gc/shared/taskqueue.hpp" #include "logging/log.hpp" TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : _n_threads(n_threads), _queue_set(queue_set), _offered_termination(0), ! _spin_master(NULL) { ! ! _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never); ! } TaskTerminator::~TaskTerminator() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Must be terminated or aborted"); assert_queue_set_empty(); } assert(_spin_master == NULL, "Should have been reset"); - assert(_blocker != NULL, "Can not be NULL"); - delete _blocker; } #ifdef ASSERT void TaskTerminator::assert_queue_set_empty() const { _queue_set->assert_empty(); } #endif - void TaskTerminator::yield() { - assert(_offered_termination <= _n_threads, "Invariant"); - os::naked_yield(); - } - void TaskTerminator::reset_for_reuse() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, ! "Terminator may still be in use"); _offered_termination = 0; } } void TaskTerminator::reset_for_reuse(uint n_threads) { --- 26,102 ---- #include "precompiled.hpp" #include "gc/shared/taskTerminator.hpp" #include "gc/shared/taskqueue.hpp" #include "logging/log.hpp" + #include "runtime/globals.hpp" + #include "runtime/mutexLocker.hpp" + #include "runtime/thread.hpp" + + TaskTerminator::DelayContext::DelayContext() { + _yield_count = 0; + reset_hard_spin_information(); + } + + void TaskTerminator::DelayContext::reset_hard_spin_information() { + _hard_spin_count = 0; + _hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; + } + + bool TaskTerminator::DelayContext::needs_sleep() const { + return _yield_count >= WorkStealingYieldsBeforeSleep; + } + + void TaskTerminator::DelayContext::do_step() { + assert(_yield_count < WorkStealingYieldsBeforeSleep, "Number of yields too large"); + // Each spin iteration is counted as a yield for purposes of + // deciding when to sleep. + _yield_count++; + // Periodically yield instead of spinning after WorkStealingSpinToYieldRatio + // spins. + if (_hard_spin_count > WorkStealingSpinToYieldRatio) { + os::naked_yield(); + reset_hard_spin_information(); + } else { + // Hard spin this time + for (uint j = 0; j < _hard_spin_limit; j++) { + SpinPause(); + } + _hard_spin_count++; + // Increase the hard spinning period but only up to a limit. + _hard_spin_limit = MIN2(2 * _hard_spin_limit, + (uint) WorkStealingHardSpins); + } + } TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : _n_threads(n_threads), _queue_set(queue_set), _offered_termination(0), ! _blocker(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never), ! _spin_master(NULL) { } TaskTerminator::~TaskTerminator() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Must be terminated or aborted"); assert_queue_set_empty(); } assert(_spin_master == NULL, "Should have been reset"); } #ifdef ASSERT void TaskTerminator::assert_queue_set_empty() const { _queue_set->assert_empty(); } #endif void TaskTerminator::reset_for_reuse() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, ! "Only %u of %u threads offered termination", _offered_termination, _n_threads); ! assert(_spin_master == NULL, "Leftover spin master " PTR_FORMAT, p2i(_spin_master)); _offered_termination = 0; } } void TaskTerminator::reset_for_reuse(uint n_threads) {
*** 79,230 **** size_t TaskTerminator::tasks_in_queue_set() const { return _queue_set->tasks(); } bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); - assert(_blocker != NULL, "Invariant"); // Single worker, done if (_n_threads == 1) { _offered_termination = 1; assert_queue_set_empty(); return true; } ! _blocker->lock_without_safepoint_check(); _offered_termination++; ! // All arrived, done if (_offered_termination == _n_threads) { ! _blocker->notify_all(); ! _blocker->unlock(); assert_queue_set_empty(); return true; } ! Thread* the_thread = Thread::current(); ! while (true) { if (_spin_master == NULL) { _spin_master = the_thread; ! _blocker->unlock(); ! ! if (do_spin_master_work(terminator)) { ! assert(_offered_termination == _n_threads, "termination condition"); ! assert_queue_set_empty(); ! return true; ! } else { ! _blocker->lock_without_safepoint_check(); ! // There is possibility that termination is reached between dropping the lock ! // before returning from do_spin_master_work() and acquiring lock above. ! if (_offered_termination == _n_threads) { ! _blocker->unlock(); ! assert_queue_set_empty(); ! return true; ! } } ! } else { ! _blocker->wait_without_safepoint_check(WorkStealingSleepMillis); ! if (_offered_termination == _n_threads) { ! _blocker->unlock(); assert_queue_set_empty(); return true; ! } ! } ! ! size_t tasks = tasks_in_queue_set(); ! if (exit_termination(tasks, terminator)) { ! assert_lock_strong(_blocker); _offered_termination--; - _blocker->unlock(); return false; } } ! } ! ! bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) { ! uint yield_count = 0; ! // Number of hard spin loops done since last yield ! uint hard_spin_count = 0; ! // Number of iterations in the hard spin loop. ! uint hard_spin_limit = WorkStealingHardSpins; ! ! // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. ! // If it is greater than 0, then start with a small number ! // of spins and increase number with each turn at spinning until ! // the count of hard spins exceeds WorkStealingSpinToYieldRatio. ! // Then do a yield() call and start spinning afresh. ! if (WorkStealingSpinToYieldRatio > 0) { ! hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; ! hard_spin_limit = MAX2(hard_spin_limit, 1U); ! } ! // Remember the initial spin limit. ! uint hard_spin_start = hard_spin_limit; ! ! // Loop waiting for all threads to offer termination or ! // more work. ! while (true) { ! // Look for more work. ! // Periodically sleep() instead of yield() to give threads ! // waiting on the cores the chance to grab this code ! if (yield_count <= WorkStealingYieldsBeforeSleep) { ! // Do a yield or hardspin. For purposes of deciding whether ! // to sleep, count this as a yield. ! yield_count++; ! ! // Periodically call yield() instead spinning ! // After WorkStealingSpinToYieldRatio spins, do a yield() call ! // and reset the counts and starting limit. ! if (hard_spin_count > WorkStealingSpinToYieldRatio) { ! yield(); ! hard_spin_count = 0; ! hard_spin_limit = hard_spin_start; ! } else { ! // Hard spin this time ! // Increase the hard spinning period but only up to a limit. ! hard_spin_limit = MIN2(2*hard_spin_limit, ! (uint) WorkStealingHardSpins); ! for (uint j = 0; j < hard_spin_limit; j++) { ! SpinPause(); ! } ! hard_spin_count++; ! } ! } else { ! log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields", ! p2i(Thread::current()), yield_count); ! yield_count = 0; ! ! MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag); _spin_master = NULL; - locker.wait(WorkStealingSleepMillis); - if (_spin_master == NULL) { - _spin_master = Thread::current(); - } else { - return false; - } } ! size_t tasks = tasks_in_queue_set(); ! bool exit = exit_termination(tasks, terminator); ! { ! MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag); ! // Termination condition reached if (_offered_termination == _n_threads) { ! _spin_master = NULL; return true; ! } else if (exit) { ! if (tasks >= _offered_termination - 1) { ! locker.notify_all(); } else { ! for (; tasks > 1; tasks--) { ! locker.notify(); ! } ! } ! _spin_master = NULL; return false; } } } } --- 110,206 ---- size_t TaskTerminator::tasks_in_queue_set() const { return _queue_set->tasks(); } + void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) { + assert(_blocker.is_locked(), "must be"); + assert(_blocker.owned_by_self(), "must be"); + assert(_offered_termination >= 1, "must be"); + + if (_spin_master == this_thread) { + _spin_master = NULL; + } + + if (tasks >= _offered_termination - 1) { + _blocker.notify_all(); + } else { + for (; tasks > 1; tasks--) { + _blocker.notify(); + } + } + } + bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); // Single worker, done if (_n_threads == 1) { _offered_termination = 1; assert_queue_set_empty(); return true; } ! Thread* the_thread = Thread::current(); ! ! MonitorLocker x(&_blocker, Mutex::_no_safepoint_check_flag); _offered_termination++; ! if (_offered_termination == _n_threads) { ! prepare_for_return(the_thread); assert_queue_set_empty(); return true; } ! for (;;) { if (_spin_master == NULL) { _spin_master = the_thread; + DelayContext delay_context; ! while (!delay_context.needs_sleep()) { ! size_t tasks; ! bool should_exit_termination; ! { ! MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag); ! delay_context.do_step(); ! // Intentionally read the number of tasks outside the mutex since this ! // is potentially a long operation making the locked section long. ! tasks = tasks_in_queue_set(); ! should_exit_termination = exit_termination(tasks, terminator); } ! // Immediately check exit conditions after re-acquiring the lock. if (_offered_termination == _n_threads) { ! prepare_for_return(the_thread); assert_queue_set_empty(); return true; ! } else if (should_exit_termination) { ! prepare_for_return(the_thread, tasks); _offered_termination--; return false; } } ! // Give up spin master before sleeping. _spin_master = NULL; } + bool timed_out = x.wait(WorkStealingSleepMillis); ! // Immediately check exit conditions after re-acquiring the lock. if (_offered_termination == _n_threads) { ! prepare_for_return(the_thread); ! assert_queue_set_empty(); return true; ! } else if (!timed_out) { ! // We were woken up. Don't bother waking up more tasks. ! prepare_for_return(the_thread, 0); ! _offered_termination--; ! return false; } else { ! size_t tasks = tasks_in_queue_set(); ! if (exit_termination(tasks, terminator)) { ! prepare_for_return(the_thread, tasks); ! _offered_termination--; return false; } } } }
< prev index next >