1 /* 2 * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_TASKQUEUE_INLINE_HPP 26 #define SHARE_VM_GC_SHARED_TASKQUEUE_INLINE_HPP 27 28 #include "gc/shared/taskqueue.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.inline.hpp" 32 #include "runtime/orderAccess.inline.hpp" 33 #include "utilities/debug.hpp" 34 #include "utilities/stack.inline.hpp" 35 36 template <class T, MEMFLAGS F> 37 inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(int n) : _n(n) { 38 typedef T* GenericTaskQueuePtr; 39 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); 40 for (int i = 0; i < n; i++) { 41 _queues[i] = NULL; 42 } 43 } 44 45 template<class E, MEMFLAGS F, unsigned int N> 46 inline void GenericTaskQueue<E, F, N>::initialize() { 47 _elems = _array_allocator.allocate(N); 48 } 49 50 template<class E, MEMFLAGS F, unsigned int N> 51 inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() { 52 FREE_C_HEAP_ARRAY(E, _elems); 53 } 54 55 template<class E, MEMFLAGS F, unsigned int N> 56 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) { 57 if (dirty_n_elems == N - 1) { 58 // Actually means 0, so do the push. 59 uint localBot = _bottom; 60 // g++ complains if the volatile result of the assignment is 61 // unused, so we cast the volatile away. We cannot cast directly 62 // to void, because gcc treats that as not using the result of the 63 // assignment. However, casting to E& means that we trigger an 64 // unused-value warning. So, we cast the E& to void. 65 (void)const_cast<E&>(_elems[localBot] = t); 66 OrderAccess::release_store(&_bottom, increment_index(localBot)); 67 TASKQUEUE_STATS_ONLY(stats.record_push()); 68 return true; 69 } 70 return false; 71 } 72 73 template<class E, MEMFLAGS F, unsigned int N> inline bool 74 GenericTaskQueue<E, F, N>::push(E t) { 75 uint localBot = _bottom; 76 assert(localBot < N, "_bottom out of range."); 77 idx_t top = _age.top(); 78 uint dirty_n_elems = dirty_size(localBot, top); 79 assert(dirty_n_elems < N, "n_elems out of range."); 80 if (dirty_n_elems < max_elems()) { 81 // g++ complains if the volatile result of the assignment is 82 // unused, so we cast the volatile away. We cannot cast directly 83 // to void, because gcc treats that as not using the result of the 84 // assignment. However, casting to E& means that we trigger an 85 // unused-value warning. So, we cast the E& to void. 86 (void) const_cast<E&>(_elems[localBot] = t); 87 OrderAccess::release_store(&_bottom, increment_index(localBot)); 88 TASKQUEUE_STATS_ONLY(stats.record_push()); 89 return true; 90 } else { 91 return push_slow(t, dirty_n_elems); 92 } 93 } 94 95 template <class E, MEMFLAGS F, unsigned int N> 96 inline bool OverflowTaskQueue<E, F, N>::push(E t) 97 { 98 if (!taskqueue_t::push(t)) { 99 overflow_stack()->push(t); 100 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); 101 } 102 return true; 103 } 104 105 // pop_local_slow() is done by the owning thread and is trying to 106 // get the last task in the queue. It will compete with pop_global() 107 // that will be used by other threads. The tag age is incremented 108 // whenever the queue goes empty which it will do here if this thread 109 // gets the last task or in pop_global() if the queue wraps (top == 0 110 // and pop_global() succeeds, see pop_global()). 111 template<class E, MEMFLAGS F, unsigned int N> 112 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { 113 // This queue was observed to contain exactly one element; either this 114 // thread will claim it, or a competing "pop_global". In either case, 115 // the queue will be logically empty afterwards. Create a new Age value 116 // that represents the empty queue for the given value of "_bottom". (We 117 // must also increment "tag" because of the case where "bottom == 1", 118 // "top == 0". A pop_global could read the queue element in that case, 119 // then have the owner thread do a pop followed by another push. Without 120 // the incrementing of "tag", the pop_global's CAS could succeed, 121 // allowing it to believe it has claimed the stale element.) 122 Age newAge((idx_t)localBot, oldAge.tag() + 1); 123 // Perhaps a competing pop_global has already incremented "top", in which 124 // case it wins the element. 125 if (localBot == oldAge.top()) { 126 // No competing pop_global has yet incremented "top"; we'll try to 127 // install new_age, thus claiming the element. 128 Age tempAge = _age.cmpxchg(newAge, oldAge); 129 if (tempAge == oldAge) { 130 // We win. 131 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 132 TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); 133 return true; 134 } 135 } 136 // We lose; a completing pop_global gets the element. But the queue is empty 137 // and top is greater than bottom. Fix this representation of the empty queue 138 // to become the canonical one. 139 _age.set(newAge); 140 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 141 return false; 142 } 143 144 template<class E, MEMFLAGS F, unsigned int N> inline bool 145 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) { 146 uint localBot = _bottom; 147 // This value cannot be N-1. That can only occur as a result of 148 // the assignment to bottom in this method. If it does, this method 149 // resets the size to 0 before the next call (which is sequential, 150 // since this is pop_local.) 151 uint dirty_n_elems = dirty_size(localBot, _age.top()); 152 assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); 153 if (dirty_n_elems == 0) return false; 154 localBot = decrement_index(localBot); 155 _bottom = localBot; 156 // This is necessary to prevent any read below from being reordered 157 // before the store just above. 158 OrderAccess::fence(); 159 // g++ complains if the volatile result of the assignment is 160 // unused, so we cast the volatile away. We cannot cast directly 161 // to void, because gcc treats that as not using the result of the 162 // assignment. However, casting to E& means that we trigger an 163 // unused-value warning. So, we cast the E& to void. 164 (void) const_cast<E&>(t = _elems[localBot]); 165 // This is a second read of "age"; the "size()" above is the first. 166 // If there's still at least one element in the queue, based on the 167 // "_bottom" and "age" we've read, then there can be no interference with 168 // a "pop_global" operation, and we're done. 169 idx_t tp = _age.top(); // XXX 170 if (size(localBot, tp) > 0) { 171 assert(dirty_size(localBot, tp) != N - 1, "sanity"); 172 TASKQUEUE_STATS_ONLY(stats.record_pop()); 173 return true; 174 } else { 175 // Otherwise, the queue contained exactly one element; we take the slow 176 // path. 177 return pop_local_slow(localBot, _age.get()); 178 } 179 } 180 181 template <class E, MEMFLAGS F, unsigned int N> 182 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) 183 { 184 if (overflow_empty()) return false; 185 t = overflow_stack()->pop(); 186 return true; 187 } 188 189 template<class E, MEMFLAGS F, unsigned int N> 190 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) { 191 Age oldAge = _age.get(); 192 // Architectures with weak memory model require a barrier here 193 // to guarantee that bottom is not older than age, 194 // which is crucial for the correctness of the algorithm. 195 #if !(defined SPARC || defined IA32 || defined AMD64) 196 OrderAccess::fence(); 197 #endif 198 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); 199 uint n_elems = size(localBot, oldAge.top()); 200 if (n_elems == 0) { 201 return false; 202 } 203 204 // g++ complains if the volatile result of the assignment is 205 // unused, so we cast the volatile away. We cannot cast directly 206 // to void, because gcc treats that as not using the result of the 207 // assignment. However, casting to E& means that we trigger an 208 // unused-value warning. So, we cast the E& to void. 209 (void) const_cast<E&>(t = _elems[oldAge.top()]); 210 Age newAge(oldAge); 211 newAge.increment(); 212 Age resAge = _age.cmpxchg(newAge, oldAge); 213 214 // Note that using "_bottom" here might fail, since a pop_local might 215 // have decremented it. 216 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); 217 return resAge == oldAge; 218 } 219 220 template<class T, MEMFLAGS F> bool 221 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) { 222 if (_n > 2) { 223 uint k1 = queue_num; 224 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 225 uint k2 = queue_num; 226 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 227 // Sample both and try the larger. 228 uint sz1 = _queues[k1]->size(); 229 uint sz2 = _queues[k2]->size(); 230 if (sz2 > sz1) return _queues[k2]->pop_global(t); 231 else return _queues[k1]->pop_global(t); 232 } else if (_n == 2) { 233 // Just try the other one. 234 uint k = (queue_num + 1) % 2; 235 return _queues[k]->pop_global(t); 236 } else { 237 assert(_n == 1, "can't be zero."); 238 return false; 239 } 240 } 241 242 template<class T, MEMFLAGS F> bool 243 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) { 244 for (uint i = 0; i < 2 * _n; i++) { 245 if (steal_best_of_2(queue_num, seed, t)) { 246 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); 247 return true; 248 } 249 } 250 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); 251 return false; 252 } 253 254 template <unsigned int N, MEMFLAGS F> 255 inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile { 256 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, 257 (volatile intptr_t *)&_data, 258 (intptr_t)old_age._data); 259 } 260 261 template<class E, MEMFLAGS F, unsigned int N> 262 inline void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { 263 // tty->print_cr("START OopTaskQueue::oops_do"); 264 uint iters = size(); 265 uint index = _bottom; 266 for (uint i = 0; i < iters; ++i) { 267 index = decrement_index(index); 268 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, 269 // index, &_elems[index], _elems[index]); 270 E* t = (E*)&_elems[index]; // cast away volatility 271 oop* p = (oop*)t; 272 assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t))); 273 f->do_oop(p); 274 } 275 // tty->print_cr("END OopTaskQueue::oops_do"); 276 } 277 278 279 #endif // SHARE_VM_GC_SHARED_TASKQUEUE_INLINE_HPP