1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shared/taskTerminator.hpp" 29 #include "gc/shared/taskqueue.hpp" 30 #include "logging/log.hpp" 31 32 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : 33 _n_threads(n_threads), 34 _queue_set(queue_set), 35 _offered_termination(0), 36 _spin_master(NULL) { 37 38 _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never); 39 } 40 41 TaskTerminator::~TaskTerminator() { 42 if (_offered_termination != 0) { 43 assert(_offered_termination == _n_threads, "Must be terminated or aborted"); 44 assert_queue_set_empty(); 45 } 46 47 assert(_spin_master == NULL, "Should have been reset"); 48 assert(_blocker != NULL, "Can not be NULL"); 49 delete _blocker; 50 } 51 52 #ifdef ASSERT 53 void TaskTerminator::assert_queue_set_empty() const { 54 _queue_set->assert_empty(); 55 } 56 #endif 57 58 void TaskTerminator::yield() { 59 assert(_offered_termination <= _n_threads, "Invariant"); 60 os::naked_yield(); 61 } 62 63 void TaskTerminator::reset_for_reuse() { 64 if (_offered_termination != 0) { 65 assert(_offered_termination == _n_threads, 66 "Terminator may still be in use"); 67 _offered_termination = 0; 68 } 69 } 70 71 void TaskTerminator::reset_for_reuse(uint n_threads) { 72 reset_for_reuse(); 73 _n_threads = n_threads; 74 } 75 76 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) { 77 return tasks > 0 || (terminator != NULL && terminator->should_exit_termination()); 78 } 79 80 size_t TaskTerminator::tasks_in_queue_set() const { 81 return _queue_set->tasks(); 82 } 83 84 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { 85 assert(_n_threads > 0, "Initialization is incorrect"); 86 assert(_offered_termination < _n_threads, "Invariant"); 87 assert(_blocker != NULL, "Invariant"); 88 89 // Single worker, done 90 if (_n_threads == 1) { 91 _offered_termination = 1; 92 assert_queue_set_empty(); 93 return true; 94 } 95 96 _blocker->lock_without_safepoint_check(); 97 _offered_termination++; 98 // All arrived, done 99 if (_offered_termination == _n_threads) { 100 _blocker->notify_all(); 101 _blocker->unlock(); 102 assert_queue_set_empty(); 103 return true; 104 } 105 106 Thread* the_thread = Thread::current(); 107 while (true) { 108 if (_spin_master == NULL) { 109 _spin_master = the_thread; 110 111 _blocker->unlock(); 112 113 if (do_spin_master_work(terminator)) { 114 assert(_offered_termination == _n_threads, "termination condition"); 115 assert_queue_set_empty(); 116 return true; 117 } else { 118 _blocker->lock_without_safepoint_check(); 119 // There is possibility that termination is reached between dropping the lock 120 // before returning from do_spin_master_work() and acquiring lock above. 121 if (_offered_termination == _n_threads) { 122 _blocker->unlock(); 123 assert_queue_set_empty(); 124 return true; 125 } 126 } 127 } else { 128 _blocker->wait_without_safepoint_check(WorkStealingSleepMillis); 129 130 if (_offered_termination == _n_threads) { 131 _blocker->unlock(); 132 assert_queue_set_empty(); 133 return true; 134 } 135 } 136 137 size_t tasks = tasks_in_queue_set(); 138 if (exit_termination(tasks, terminator)) { 139 assert_lock_strong(_blocker); 140 _offered_termination--; 141 _blocker->unlock(); 142 return false; 143 } 144 } 145 } 146 147 bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) { 148 uint yield_count = 0; 149 // Number of hard spin loops done since last yield 150 uint hard_spin_count = 0; 151 // Number of iterations in the hard spin loop. 152 uint hard_spin_limit = WorkStealingHardSpins; 153 154 // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. 155 // If it is greater than 0, then start with a small number 156 // of spins and increase number with each turn at spinning until 157 // the count of hard spins exceeds WorkStealingSpinToYieldRatio. 158 // Then do a yield() call and start spinning afresh. 159 if (WorkStealingSpinToYieldRatio > 0) { 160 hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; 161 hard_spin_limit = MAX2(hard_spin_limit, 1U); 162 } 163 // Remember the initial spin limit. 164 uint hard_spin_start = hard_spin_limit; 165 166 // Loop waiting for all threads to offer termination or 167 // more work. 168 while (true) { 169 // Look for more work. 170 // Periodically sleep() instead of yield() to give threads 171 // waiting on the cores the chance to grab this code 172 if (yield_count <= WorkStealingYieldsBeforeSleep) { 173 // Do a yield or hardspin. For purposes of deciding whether 174 // to sleep, count this as a yield. 175 yield_count++; 176 177 // Periodically call yield() instead spinning 178 // After WorkStealingSpinToYieldRatio spins, do a yield() call 179 // and reset the counts and starting limit. 180 if (hard_spin_count > WorkStealingSpinToYieldRatio) { 181 yield(); 182 hard_spin_count = 0; 183 hard_spin_limit = hard_spin_start; 184 } else { 185 // Hard spin this time 186 // Increase the hard spinning period but only up to a limit. 187 hard_spin_limit = MIN2(2*hard_spin_limit, 188 (uint) WorkStealingHardSpins); 189 for (uint j = 0; j < hard_spin_limit; j++) { 190 SpinPause(); 191 } 192 hard_spin_count++; 193 } 194 } else { 195 log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields", 196 p2i(Thread::current()), yield_count); 197 yield_count = 0; 198 199 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag); 200 _spin_master = NULL; 201 locker.wait(WorkStealingSleepMillis); 202 if (_spin_master == NULL) { 203 _spin_master = Thread::current(); 204 } else { 205 return false; 206 } 207 } 208 209 size_t tasks = tasks_in_queue_set(); 210 bool exit = exit_termination(tasks, terminator); 211 { 212 MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag); 213 // Termination condition reached 214 if (_offered_termination == _n_threads) { 215 _spin_master = NULL; 216 return true; 217 } else if (exit) { 218 if (tasks >= _offered_termination - 1) { 219 locker.notify_all(); 220 } else { 221 for (; tasks > 1; tasks--) { 222 locker.notify(); 223 } 224 } 225 _spin_master = NULL; 226 return false; 227 } 228 } 229 } 230 }