12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/taskTerminator.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "logging/log.hpp"
31
32 #ifdef TRACESPINNING
33 uint TaskTerminator::_total_yields = 0;
34 uint TaskTerminator::_total_spins = 0;
35 uint TaskTerminator::_total_peeks = 0;
36 #endif
37
38 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
39 _n_threads(n_threads),
40 _queue_set(queue_set),
41 _offered_termination(0),
42 _spin_master(NULL) {
43
44 _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
45 }
46
47 TaskTerminator::~TaskTerminator() {
48 assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
49 assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
50
51 assert(_spin_master == NULL, "Should have been reset");
52 assert(_blocker != NULL, "Can not be NULL");
53 delete _blocker;
54 }
55
56 #ifdef ASSERT
57 bool TaskTerminator::peek_in_queue_set() {
58 return _queue_set->peek();
59 }
60 #endif
61
62 void TaskTerminator::yield() {
63 assert(_offered_termination <= _n_threads, "Invariant");
64 os::naked_yield();
65 }
66
67 #ifdef TRACESPINNING
68 void TaskTerminator::print_termination_counts() {
69 log_trace(gc, task)("TaskTerminator Yields: %u Spins: %u Peeks: %u",
70 total_yields(), total_spins(), total_peeks());
71 }
72 #endif
73
74 void TaskTerminator::reset_for_reuse() {
75 if (_offered_termination != 0) {
76 assert(_offered_termination == _n_threads,
77 "Terminator may still be in use");
78 _offered_termination = 0;
79 }
80 }
81
82 void TaskTerminator::reset_for_reuse(uint n_threads) {
83 reset_for_reuse();
84 _n_threads = n_threads;
85 }
86
87 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
88 return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
89 }
90
91 size_t TaskTerminator::tasks_in_queue_set() const {
92 return _queue_set->tasks();
93 }
175 uint hard_spin_start = hard_spin_limit;
176
177 // Loop waiting for all threads to offer termination or
178 // more work.
179 while (true) {
180 // Look for more work.
181 // Periodically sleep() instead of yield() to give threads
182 // waiting on the cores the chance to grab this code
183 if (yield_count <= WorkStealingYieldsBeforeSleep) {
184 // Do a yield or hardspin. For purposes of deciding whether
185 // to sleep, count this as a yield.
186 yield_count++;
187
188 // Periodically call yield() instead spinning
189 // After WorkStealingSpinToYieldRatio spins, do a yield() call
190 // and reset the counts and starting limit.
191 if (hard_spin_count > WorkStealingSpinToYieldRatio) {
192 yield();
193 hard_spin_count = 0;
194 hard_spin_limit = hard_spin_start;
195 #ifdef TRACESPINNING
196 _total_yields++;
197 #endif
198 } else {
199 // Hard spin this time
200 // Increase the hard spinning period but only up to a limit.
201 hard_spin_limit = MIN2(2*hard_spin_limit,
202 (uint) WorkStealingHardSpins);
203 for (uint j = 0; j < hard_spin_limit; j++) {
204 SpinPause();
205 }
206 hard_spin_count++;
207 #ifdef TRACESPINNING
208 _total_spins++;
209 #endif
210 }
211 } else {
212 log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
213 p2i(Thread::current()), yield_count);
214 yield_count = 0;
215
216 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
217 _spin_master = NULL;
218 locker.wait(WorkStealingSleepMillis);
219 if (_spin_master == NULL) {
220 _spin_master = Thread::current();
221 } else {
222 return false;
223 }
224 }
225
226 #ifdef TRACESPINNING
227 _total_peeks++;
228 #endif
229 size_t tasks = tasks_in_queue_set();
230 bool exit = exit_termination(tasks, terminator);
231 {
232 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
233 // Termination condition reached
234 if (_offered_termination == _n_threads) {
235 _spin_master = NULL;
236 return true;
237 } else if (exit) {
238 if (tasks >= _offered_termination - 1) {
239 locker.notify_all();
240 } else {
241 for (; tasks > 1; tasks--) {
242 locker.notify();
243 }
244 }
245 _spin_master = NULL;
246 return false;
247 }
248 }
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/taskTerminator.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "logging/log.hpp"
31
32 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
33 _n_threads(n_threads),
34 _queue_set(queue_set),
35 _offered_termination(0),
36 _spin_master(NULL) {
37
38 _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
39 }
40
41 TaskTerminator::~TaskTerminator() {
42 assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
43 assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
44
45 assert(_spin_master == NULL, "Should have been reset");
46 assert(_blocker != NULL, "Can not be NULL");
47 delete _blocker;
48 }
49
50 #ifdef ASSERT
51 bool TaskTerminator::peek_in_queue_set() {
52 return _queue_set->peek();
53 }
54 #endif
55
56 void TaskTerminator::yield() {
57 assert(_offered_termination <= _n_threads, "Invariant");
58 os::naked_yield();
59 }
60
61 void TaskTerminator::reset_for_reuse() {
62 if (_offered_termination != 0) {
63 assert(_offered_termination == _n_threads,
64 "Terminator may still be in use");
65 _offered_termination = 0;
66 }
67 }
68
69 void TaskTerminator::reset_for_reuse(uint n_threads) {
70 reset_for_reuse();
71 _n_threads = n_threads;
72 }
73
74 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
75 return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
76 }
77
78 size_t TaskTerminator::tasks_in_queue_set() const {
79 return _queue_set->tasks();
80 }
162 uint hard_spin_start = hard_spin_limit;
163
164 // Loop waiting for all threads to offer termination or
165 // more work.
166 while (true) {
167 // Look for more work.
168 // Periodically sleep() instead of yield() to give threads
169 // waiting on the cores the chance to grab this code
170 if (yield_count <= WorkStealingYieldsBeforeSleep) {
171 // Do a yield or hardspin. For purposes of deciding whether
172 // to sleep, count this as a yield.
173 yield_count++;
174
175 // Periodically call yield() instead spinning
176 // After WorkStealingSpinToYieldRatio spins, do a yield() call
177 // and reset the counts and starting limit.
178 if (hard_spin_count > WorkStealingSpinToYieldRatio) {
179 yield();
180 hard_spin_count = 0;
181 hard_spin_limit = hard_spin_start;
182 } else {
183 // Hard spin this time
184 // Increase the hard spinning period but only up to a limit.
185 hard_spin_limit = MIN2(2*hard_spin_limit,
186 (uint) WorkStealingHardSpins);
187 for (uint j = 0; j < hard_spin_limit; j++) {
188 SpinPause();
189 }
190 hard_spin_count++;
191 }
192 } else {
193 log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
194 p2i(Thread::current()), yield_count);
195 yield_count = 0;
196
197 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
198 _spin_master = NULL;
199 locker.wait(WorkStealingSleepMillis);
200 if (_spin_master == NULL) {
201 _spin_master = Thread::current();
202 } else {
203 return false;
204 }
205 }
206
207 size_t tasks = tasks_in_queue_set();
208 bool exit = exit_termination(tasks, terminator);
209 {
210 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
211 // Termination condition reached
212 if (_offered_termination == _n_threads) {
213 _spin_master = NULL;
214 return true;
215 } else if (exit) {
216 if (tasks >= _offered_termination - 1) {
217 locker.notify_all();
218 } else {
219 for (; tasks > 1; tasks--) {
220 locker.notify();
221 }
222 }
223 _spin_master = NULL;
224 return false;
225 }
226 }
|