1 /*
2 * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
3 * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/gc_globals.hpp"
29 #include "gc/shared/taskTerminator.hpp"
30 #include "gc/shared/taskqueue.hpp"
31 #include "logging/log.hpp"
32 #include "runtime/mutex.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/thread.hpp"
35
36 TaskTerminator::DelayContext::DelayContext() {
37 _yield_count = 0;
38 _hard_spin_count = 0;
39 _hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
40 }
41
42 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
43 _n_threads(n_threads),
44 _queue_set(queue_set),
45 _offered_termination(0),
46 _blocker(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never),
47 _spin_master(NULL) { }
48
49 TaskTerminator::~TaskTerminator() {
50 if (_offered_termination != 0) {
51 assert(_offered_termination == _n_threads, "Must be terminated or aborted");
52 assert_queue_set_empty();
53 }
54
55 assert(_spin_master == NULL, "Should have been reset");
56 }
57
58 #ifdef ASSERT
59 void TaskTerminator::assert_queue_set_empty() const {
60 _queue_set->assert_empty();
61 }
84 }
85
86 void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) {
87 assert(_blocker.is_locked(), "must be");
88 assert(_blocker.owned_by_self(), "must be");
89 assert(_offered_termination >= 1, "must be");
90
91 if (_spin_master == this_thread) {
92 _spin_master = NULL;
93 }
94
95 if (tasks >= _offered_termination - 1) {
96 _blocker.notify_all();
97 } else {
98 for (; tasks > 1; tasks--) {
99 _blocker.notify();
100 }
101 }
102 }
103
104 bool TaskTerminator::do_delay_step(DelayContext& delay_context) {
105 assert(!_blocker.owned_by_self(), "should not be owned by self");
106
107 if (delay_context._yield_count < WorkStealingYieldsBeforeSleep) {
108 delay_context._yield_count++;
109 // Periodically call yield() instead spinning
110 // After WorkStealingSpinToYieldRatio spins, do a yield() call
111 // and reset the counts and starting limit.
112 if (delay_context._hard_spin_count > WorkStealingSpinToYieldRatio) {
113 os::naked_yield();
114 delay_context._hard_spin_count = 0;
115 delay_context._hard_spin_limit = WorkStealingHardSpins;
116 } else {
117 // Hard spin this time
118 // Increase the hard spinning period but only up to a limit.
119 delay_context._hard_spin_limit = MIN2(2 * delay_context._hard_spin_limit,
120 (uint) WorkStealingHardSpins);
121 for (uint j = 0; j < delay_context._hard_spin_limit; j++) {
122 SpinPause();
123 }
124 delay_context._hard_spin_count++;
125 }
126 return false;
127 } else {
128 return true;
129 }
130 }
131
132 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
133 assert(_n_threads > 0, "Initialization is incorrect");
134 assert(_offered_termination < _n_threads, "Invariant");
135
136 // Single worker, done
137 if (_n_threads == 1) {
138 _offered_termination = 1;
139 assert_queue_set_empty();
140 return true;
141 }
142
143 Thread* the_thread = Thread::current();
144
145 MonitorLocker x(&_blocker, Mutex::_no_safepoint_check_flag);
146 _offered_termination++;
147
148 if (_offered_termination == _n_threads) {
149 prepare_for_return(the_thread);
150 assert_queue_set_empty();
151 return true;
152 }
153
154 for (;;) {
155 DelayContext delay_context;
156 if (_spin_master == NULL) {
157 _spin_master = the_thread;
158
159 while (delay_context._yield_count < WorkStealingYieldsBeforeSleep) {
160 // Each spin iteration is counted as a yield for purposes of
161 // deciding when to sleep.
162 ++delay_context._yield_count;
163 size_t tasks;
164 bool should_exit_termination;
165 {
166 MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag);
167 do_delay_step(delay_context);
168 // Intentionally read the number of tasks outside the mutex since this
169 // is potentially a long operation making the locked section long.
170 tasks = tasks_in_queue_set();
171 should_exit_termination = exit_termination(tasks, terminator);
172 }
173 // Immediately check exit conditions after re-acquiring the lock.
174 if (_offered_termination == _n_threads) {
175 prepare_for_return(the_thread);
176 assert_queue_set_empty();
177 return true;
178 } else if (should_exit_termination) {
179 prepare_for_return(the_thread, tasks);
180 _offered_termination--;
181 return false;
182 }
183 }
184 // Give up spin master before sleeping.
185 _spin_master = NULL;
186 }
187 bool timed_out = x.wait(WorkStealingSleepMillis);
|
1 /*
2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
3 * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/taskTerminator.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "logging/log.hpp"
31 #include "runtime/globals.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/thread.hpp"
34
35 TaskTerminator::DelayContext::DelayContext() {
36 _yield_count = 0;
37 reset_hard_spin_information();
38 }
39
40 void TaskTerminator::DelayContext::reset_hard_spin_information() {
41 _hard_spin_count = 0;
42 _hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
43 }
44
45 bool TaskTerminator::DelayContext::needs_sleep() const {
46 return _yield_count >= WorkStealingYieldsBeforeSleep;
47 }
48
49 void TaskTerminator::DelayContext::do_step() {
50 assert(_yield_count < WorkStealingYieldsBeforeSleep, "Number of yields too large");
51 // Each spin iteration is counted as a yield for purposes of
52 // deciding when to sleep.
53 _yield_count++;
54 // Periodically yield instead of spinning after WorkStealingSpinToYieldRatio
55 // spins.
56 if (_hard_spin_count > WorkStealingSpinToYieldRatio) {
57 os::naked_yield();
58 reset_hard_spin_information();
59 } else {
60 // Hard spin this time
61 for (uint j = 0; j < _hard_spin_limit; j++) {
62 SpinPause();
63 }
64 _hard_spin_count++;
65 // Increase the hard spinning period but only up to a limit.
66 _hard_spin_limit = MIN2(2 * _hard_spin_limit,
67 (uint) WorkStealingHardSpins);
68 }
69 }
70
71 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
72 _n_threads(n_threads),
73 _queue_set(queue_set),
74 _offered_termination(0),
75 _blocker(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never),
76 _spin_master(NULL) { }
77
78 TaskTerminator::~TaskTerminator() {
79 if (_offered_termination != 0) {
80 assert(_offered_termination == _n_threads, "Must be terminated or aborted");
81 assert_queue_set_empty();
82 }
83
84 assert(_spin_master == NULL, "Should have been reset");
85 }
86
87 #ifdef ASSERT
88 void TaskTerminator::assert_queue_set_empty() const {
89 _queue_set->assert_empty();
90 }
113 }
114
115 void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) {
116 assert(_blocker.is_locked(), "must be");
117 assert(_blocker.owned_by_self(), "must be");
118 assert(_offered_termination >= 1, "must be");
119
120 if (_spin_master == this_thread) {
121 _spin_master = NULL;
122 }
123
124 if (tasks >= _offered_termination - 1) {
125 _blocker.notify_all();
126 } else {
127 for (; tasks > 1; tasks--) {
128 _blocker.notify();
129 }
130 }
131 }
132
133 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
134 assert(_n_threads > 0, "Initialization is incorrect");
135 assert(_offered_termination < _n_threads, "Invariant");
136
137 // Single worker, done
138 if (_n_threads == 1) {
139 _offered_termination = 1;
140 assert_queue_set_empty();
141 return true;
142 }
143
144 Thread* the_thread = Thread::current();
145
146 MonitorLocker x(&_blocker, Mutex::_no_safepoint_check_flag);
147 _offered_termination++;
148
149 if (_offered_termination == _n_threads) {
150 prepare_for_return(the_thread);
151 assert_queue_set_empty();
152 return true;
153 }
154
155 for (;;) {
156 if (_spin_master == NULL) {
157 _spin_master = the_thread;
158 DelayContext delay_context;
159
160 while (!delay_context.needs_sleep()) {
161 size_t tasks;
162 bool should_exit_termination;
163 {
164 MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag);
165 delay_context.do_step();
166 // Intentionally read the number of tasks outside the mutex since this
167 // is potentially a long operation making the locked section long.
168 tasks = tasks_in_queue_set();
169 should_exit_termination = exit_termination(tasks, terminator);
170 }
171 // Immediately check exit conditions after re-acquiring the lock.
172 if (_offered_termination == _n_threads) {
173 prepare_for_return(the_thread);
174 assert_queue_set_empty();
175 return true;
176 } else if (should_exit_termination) {
177 prepare_for_return(the_thread, tasks);
178 _offered_termination--;
179 return false;
180 }
181 }
182 // Give up spin master before sleeping.
183 _spin_master = NULL;
184 }
185 bool timed_out = x.wait(WorkStealingSleepMillis);
|