--- old/src/hotspot/share/gc/shared/taskTerminator.cpp 2020-08-14 10:07:53.080951850 +0200 +++ new/src/hotspot/share/gc/shared/taskTerminator.cpp 2020-08-14 10:07:52.952950930 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. + * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -25,20 +25,49 @@ #include "precompiled.hpp" -#include "gc/shared/gc_globals.hpp" #include "gc/shared/taskTerminator.hpp" #include "gc/shared/taskqueue.hpp" #include "logging/log.hpp" -#include "runtime/mutex.hpp" +#include "runtime/globals.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/thread.hpp" TaskTerminator::DelayContext::DelayContext() { _yield_count = 0; + reset_hard_spin_information(); +} + +void TaskTerminator::DelayContext::reset_hard_spin_information() { _hard_spin_count = 0; _hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; } +bool TaskTerminator::DelayContext::needs_sleep() const { + return _yield_count >= WorkStealingYieldsBeforeSleep; +} + +void TaskTerminator::DelayContext::do_step() { + assert(_yield_count < WorkStealingYieldsBeforeSleep, "Number of yields too large"); + // Each spin iteration is counted as a yield for purposes of + // deciding when to sleep. + _yield_count++; + // Periodically yield instead of spinning after WorkStealingSpinToYieldRatio + // spins. + if (_hard_spin_count > WorkStealingSpinToYieldRatio) { + os::naked_yield(); + reset_hard_spin_information(); + } else { + // Hard spin this time + for (uint j = 0; j < _hard_spin_limit; j++) { + SpinPause(); + } + _hard_spin_count++; + // Increase the hard spinning period but only up to a limit. + _hard_spin_limit = MIN2(2 * _hard_spin_limit, + (uint) WorkStealingHardSpins); + } +} + TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : _n_threads(n_threads), _queue_set(queue_set), @@ -101,34 +130,6 @@ } } -bool TaskTerminator::do_delay_step(DelayContext& delay_context) { - assert(!_blocker.owned_by_self(), "should not be owned by self"); - - if (delay_context._yield_count < WorkStealingYieldsBeforeSleep) { - delay_context._yield_count++; - // Periodically call yield() instead spinning - // After WorkStealingSpinToYieldRatio spins, do a yield() call - // and reset the counts and starting limit. - if (delay_context._hard_spin_count > WorkStealingSpinToYieldRatio) { - os::naked_yield(); - delay_context._hard_spin_count = 0; - delay_context._hard_spin_limit = WorkStealingHardSpins; - } else { - // Hard spin this time - // Increase the hard spinning period but only up to a limit. - delay_context._hard_spin_limit = MIN2(2 * delay_context._hard_spin_limit, - (uint) WorkStealingHardSpins); - for (uint j = 0; j < delay_context._hard_spin_limit; j++) { - SpinPause(); - } - delay_context._hard_spin_count++; - } - return false; - } else { - return true; - } -} - bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); @@ -152,19 +153,16 @@ } for (;;) { - DelayContext delay_context; if (_spin_master == NULL) { _spin_master = the_thread; + DelayContext delay_context; - while (delay_context._yield_count < WorkStealingYieldsBeforeSleep) { - // Each spin iteration is counted as a yield for purposes of - // deciding when to sleep. - ++delay_context._yield_count; + while (!delay_context.needs_sleep()) { size_t tasks; bool should_exit_termination; { MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag); - do_delay_step(delay_context); + delay_context.do_step(); // Intentionally read the number of tasks outside the mutex since this // is potentially a long operation making the locked section long. tasks = tasks_in_queue_set();