< prev index next >
src/hotspot/share/gc/shared/taskTerminator.cpp
Print this page
rev 60497 : [mq]: 8245721-kbarrett-review2
@@ -1,7 +1,7 @@
/*
- * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@@ -23,24 +23,53 @@
*
*/
#include "precompiled.hpp"
-#include "gc/shared/gc_globals.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/taskqueue.hpp"
#include "logging/log.hpp"
-#include "runtime/mutex.hpp"
+#include "runtime/globals.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"
TaskTerminator::DelayContext::DelayContext() {
_yield_count = 0;
+ reset_hard_spin_information();
+}
+
+void TaskTerminator::DelayContext::reset_hard_spin_information() {
_hard_spin_count = 0;
_hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
}
+bool TaskTerminator::DelayContext::needs_sleep() const {
+ return _yield_count >= WorkStealingYieldsBeforeSleep;
+}
+
+void TaskTerminator::DelayContext::do_step() {
+ assert(_yield_count < WorkStealingYieldsBeforeSleep, "Number of yields too large");
+ // Each spin iteration is counted as a yield for purposes of
+ // deciding when to sleep.
+ _yield_count++;
+ // Periodically yield instead of spinning after WorkStealingSpinToYieldRatio
+ // spins.
+ if (_hard_spin_count > WorkStealingSpinToYieldRatio) {
+ os::naked_yield();
+ reset_hard_spin_information();
+ } else {
+ // Hard spin this time
+ for (uint j = 0; j < _hard_spin_limit; j++) {
+ SpinPause();
+ }
+ _hard_spin_count++;
+ // Increase the hard spinning period but only up to a limit.
+ _hard_spin_limit = MIN2(2 * _hard_spin_limit,
+ (uint) WorkStealingHardSpins);
+ }
+}
+
TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
_n_threads(n_threads),
_queue_set(queue_set),
_offered_termination(0),
_blocker(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never),
@@ -99,38 +128,10 @@
_blocker.notify();
}
}
}
-bool TaskTerminator::do_delay_step(DelayContext& delay_context) {
- assert(!_blocker.owned_by_self(), "should not be owned by self");
-
- if (delay_context._yield_count < WorkStealingYieldsBeforeSleep) {
- delay_context._yield_count++;
- // Periodically call yield() instead spinning
- // After WorkStealingSpinToYieldRatio spins, do a yield() call
- // and reset the counts and starting limit.
- if (delay_context._hard_spin_count > WorkStealingSpinToYieldRatio) {
- os::naked_yield();
- delay_context._hard_spin_count = 0;
- delay_context._hard_spin_limit = WorkStealingHardSpins;
- } else {
- // Hard spin this time
- // Increase the hard spinning period but only up to a limit.
- delay_context._hard_spin_limit = MIN2(2 * delay_context._hard_spin_limit,
- (uint) WorkStealingHardSpins);
- for (uint j = 0; j < delay_context._hard_spin_limit; j++) {
- SpinPause();
- }
- delay_context._hard_spin_count++;
- }
- return false;
- } else {
- return true;
- }
-}
-
bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
assert(_n_threads > 0, "Initialization is incorrect");
assert(_offered_termination < _n_threads, "Invariant");
// Single worker, done
@@ -150,23 +151,20 @@
assert_queue_set_empty();
return true;
}
for (;;) {
- DelayContext delay_context;
if (_spin_master == NULL) {
_spin_master = the_thread;
+ DelayContext delay_context;
- while (delay_context._yield_count < WorkStealingYieldsBeforeSleep) {
- // Each spin iteration is counted as a yield for purposes of
- // deciding when to sleep.
- ++delay_context._yield_count;
+ while (!delay_context.needs_sleep()) {
size_t tasks;
bool should_exit_termination;
{
MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag);
- do_delay_step(delay_context);
+ delay_context.do_step();
// Intentionally read the number of tasks outside the mutex since this
// is potentially a long operation making the locked section long.
tasks = tasks_in_queue_set();
should_exit_termination = exit_termination(tasks, terminator);
}
< prev index next >