1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shared/taskTerminator.hpp" 29 #include "gc/shared/taskqueue.hpp" 30 #include "logging/log.hpp" 31 32 #ifdef TRACESPINNING 33 uint TaskTerminator::_total_yields = 0; 34 uint TaskTerminator::_total_spins = 0; 35 uint TaskTerminator::_total_peeks = 0; 36 #endif 37 38 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : 39 _n_threads(n_threads), 40 _queue_set(queue_set), 41 _offered_termination(0), 42 _spin_master(NULL) { 43 44 _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never); 45 } 46 47 TaskTerminator::~TaskTerminator() { 48 assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition"); 49 assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" ); 50 51 assert(_spin_master == NULL, "Should have been reset"); 52 assert(_blocker != NULL, "Can not be NULL"); 53 delete _blocker; 54 } 55 56 #ifdef ASSERT 57 bool TaskTerminator::peek_in_queue_set() { 58 return _queue_set->peek(); 59 } 60 #endif 61 62 void TaskTerminator::yield() { 63 assert(_offered_termination <= _n_threads, "Invariant"); 64 os::naked_yield(); 65 } 66 67 #ifdef TRACESPINNING 68 void TaskTerminator::print_termination_counts() { 69 log_trace(gc, task)("TaskTerminator Yields: %u Spins: %u Peeks: %u", 70 total_yields(), total_spins(), total_peeks()); 71 } 72 #endif 73 74 void TaskTerminator::reset_for_reuse() { 75 if (_offered_termination != 0) { 76 assert(_offered_termination == _n_threads, 77 "Terminator may still be in use"); 78 _offered_termination = 0; 79 } 80 } 81 82 void TaskTerminator::reset_for_reuse(uint n_threads) { 83 reset_for_reuse(); 84 _n_threads = n_threads; 85 } 86 87 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) { 88 return tasks > 0 || (terminator != NULL && terminator->should_exit_termination()); 89 } 90 91 size_t TaskTerminator::tasks_in_queue_set() const { 92 return _queue_set->tasks(); 93 } 94 95 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { 96 assert(_n_threads > 0, "Initialization is incorrect"); 97 assert(_offered_termination < _n_threads, "Invariant"); 98 assert(_blocker != NULL, "Invariant"); 99 100 // Single worker, done 101 if (_n_threads == 1) { 102 _offered_termination = 1; 103 assert(!peek_in_queue_set(), "Precondition"); 104 return true; 105 } 106 107 _blocker->lock_without_safepoint_check(); 108 _offered_termination++; 109 // All arrived, done 110 if (_offered_termination == _n_threads) { 111 _blocker->notify_all(); 112 _blocker->unlock(); 113 assert(!peek_in_queue_set(), "Precondition"); 114 return true; 115 } 116 117 Thread* the_thread = Thread::current(); 118 while (true) { 119 if (_spin_master == NULL) { 120 _spin_master = the_thread; 121 122 _blocker->unlock(); 123 124 if (do_spin_master_work(terminator)) { 125 assert(_offered_termination == _n_threads, "termination condition"); 126 assert(!peek_in_queue_set(), "Precondition"); 127 return true; 128 } else { 129 _blocker->lock_without_safepoint_check(); 130 // There is possibility that termination is reached between dropping the lock 131 // before returning from do_spin_master_work() and acquiring lock above. 132 if (_offered_termination == _n_threads) { 133 _blocker->unlock(); 134 assert(!peek_in_queue_set(), "Precondition"); 135 return true; 136 } 137 } 138 } else { 139 _blocker->wait_without_safepoint_check(WorkStealingSleepMillis); 140 141 if (_offered_termination == _n_threads) { 142 _blocker->unlock(); 143 assert(!peek_in_queue_set(), "Precondition"); 144 return true; 145 } 146 } 147 148 size_t tasks = tasks_in_queue_set(); 149 if (exit_termination(tasks, terminator)) { 150 assert_lock_strong(_blocker); 151 _offered_termination--; 152 _blocker->unlock(); 153 return false; 154 } 155 } 156 } 157 158 bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) { 159 uint yield_count = 0; 160 // Number of hard spin loops done since last yield 161 uint hard_spin_count = 0; 162 // Number of iterations in the hard spin loop. 163 uint hard_spin_limit = WorkStealingHardSpins; 164 165 // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. 166 // If it is greater than 0, then start with a small number 167 // of spins and increase number with each turn at spinning until 168 // the count of hard spins exceeds WorkStealingSpinToYieldRatio. 169 // Then do a yield() call and start spinning afresh. 170 if (WorkStealingSpinToYieldRatio > 0) { 171 hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; 172 hard_spin_limit = MAX2(hard_spin_limit, 1U); 173 } 174 // Remember the initial spin limit. 175 uint hard_spin_start = hard_spin_limit; 176 177 // Loop waiting for all threads to offer termination or 178 // more work. 179 while (true) { 180 // Look for more work. 181 // Periodically sleep() instead of yield() to give threads 182 // waiting on the cores the chance to grab this code 183 if (yield_count <= WorkStealingYieldsBeforeSleep) { 184 // Do a yield or hardspin. For purposes of deciding whether 185 // to sleep, count this as a yield. 186 yield_count++; 187 188 // Periodically call yield() instead spinning 189 // After WorkStealingSpinToYieldRatio spins, do a yield() call 190 // and reset the counts and starting limit. 191 if (hard_spin_count > WorkStealingSpinToYieldRatio) { 192 yield(); 193 hard_spin_count = 0; 194 hard_spin_limit = hard_spin_start; 195 #ifdef TRACESPINNING 196 _total_yields++; 197 #endif 198 } else { 199 // Hard spin this time 200 // Increase the hard spinning period but only up to a limit. 201 hard_spin_limit = MIN2(2*hard_spin_limit, 202 (uint) WorkStealingHardSpins); 203 for (uint j = 0; j < hard_spin_limit; j++) { 204 SpinPause(); 205 } 206 hard_spin_count++; 207 #ifdef TRACESPINNING 208 _total_spins++; 209 #endif 210 } 211 } else { 212 log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields", 213 p2i(Thread::current()), yield_count); 214 yield_count = 0; 215 216 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag); 217 _spin_master = NULL; 218 locker.wait(WorkStealingSleepMillis); 219 if (_spin_master == NULL) { 220 _spin_master = Thread::current(); 221 } else { 222 return false; 223 } 224 } 225 226 #ifdef TRACESPINNING 227 _total_peeks++; 228 #endif 229 size_t tasks = tasks_in_queue_set(); 230 bool exit = exit_termination(tasks, terminator); 231 { 232 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag); 233 // Termination condition reached 234 if (_offered_termination == _n_threads) { 235 _spin_master = NULL; 236 return true; 237 } else if (exit) { 238 if (tasks >= _offered_termination - 1) { 239 locker.notify_all(); 240 } else { 241 for (; tasks > 1; tasks--) { 242 locker.notify(); 243 } 244 } 245 _spin_master = NULL; 246 return false; 247 } 248 } 249 } 250 }