1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_TASKQUEUE_HPP 26 #define SHARE_VM_GC_SHARED_TASKQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "oops/oopsHierarchy.hpp" 30 #include "utilities/ostream.hpp" 31 #include "utilities/stack.hpp" 32 33 // Simple TaskQueue stats that are collected by default in debug builds. 34 35 #if !defined(TASKQUEUE_STATS) && defined(ASSERT) 36 #define TASKQUEUE_STATS 1 37 #elif !defined(TASKQUEUE_STATS) 38 #define TASKQUEUE_STATS 1 39 #endif 40 41 #if TASKQUEUE_STATS 42 #define TASKQUEUE_STATS_ONLY(code) code 43 #else 44 #define TASKQUEUE_STATS_ONLY(code) 45 #endif // TASKQUEUE_STATS 46 47 #if TASKQUEUE_STATS 48 class TaskQueueStats { 49 public: 50 enum StatId { 51 push, // number of taskqueue pushes 52 pop, // number of taskqueue pops 53 pop_slow, // subset of taskqueue pops that were done slow-path 54 steal_attempt, // number of taskqueue steal attempts 55 steal, // number of taskqueue steals 56 overflow, // number of overflow pushes 57 overflow_max_len, // max length of overflow stack 58 last_stat_id 59 }; 60 61 public: 62 inline TaskQueueStats() { reset(); } 63 64 inline void record_push() { ++_stats[push]; } 65 inline void record_pop() { ++_stats[pop]; } 66 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } 67 inline void record_steal_attempt() { ++_stats[steal_attempt]; } 68 inline void record_steal() { ++_stats[steal]; } 69 inline void record_overflow(size_t new_length); 70 71 TaskQueueStats & operator +=(const TaskQueueStats & addend); 72 73 inline size_t get(StatId id) const { return _stats[id]; } 74 inline const size_t* get() const { return _stats; } 75 76 inline void reset(); 77 78 // Print the specified line of the header (does not include a line separator). 79 static void print_header(unsigned int line, outputStream* const stream = tty, 80 unsigned int width = 10); 81 // Print the statistics (does not include a line separator). 82 void print(outputStream* const stream = tty, unsigned int width = 10) const; 83 84 DEBUG_ONLY(void verify() const;) 85 86 private: 87 size_t _stats[last_stat_id]; 88 static const char * const _names[last_stat_id]; 89 }; 90 91 void TaskQueueStats::record_overflow(size_t new_len) { 92 ++_stats[overflow]; 93 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len; 94 } 95 96 void TaskQueueStats::reset() { 97 memset(_stats, 0, sizeof(_stats)); 98 } 99 #endif // TASKQUEUE_STATS 100 101 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances. 102 103 template <unsigned int N, MEMFLAGS F> 104 class TaskQueueSuper: public CHeapObj<F> { 105 protected: 106 // Internal type for indexing the queue; also used for the tag. 107 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t; 108 109 // The first free element after the last one pushed (mod N). 110 volatile uint _bottom; 111 112 enum { MOD_N_MASK = N - 1 }; 113 114 class Age { 115 public: 116 Age(size_t data = 0) { _data = data; } 117 Age(const Age& age) { _data = age._data; } 118 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; } 119 120 Age get() const volatile { return _data; } 121 void set(Age age) volatile { _data = age._data; } 122 123 idx_t top() const volatile { return _fields._top; } 124 idx_t tag() const volatile { return _fields._tag; } 125 126 // Increment top; if it wraps, increment tag also. 127 void increment() { 128 _fields._top = increment_index(_fields._top); 129 if (_fields._top == 0) ++_fields._tag; 130 } 131 132 Age cmpxchg(const Age new_age, const Age old_age) volatile; 133 134 bool operator ==(const Age& other) const { return _data == other._data; } 135 136 private: 137 struct fields { 138 idx_t _top; 139 idx_t _tag; 140 }; 141 union { 142 size_t _data; 143 fields _fields; 144 }; 145 }; 146 147 volatile Age _age; 148 149 // These both operate mod N. 150 static uint increment_index(uint ind) { 151 return (ind + 1) & MOD_N_MASK; 152 } 153 static uint decrement_index(uint ind) { 154 return (ind - 1) & MOD_N_MASK; 155 } 156 157 // Returns a number in the range [0..N). If the result is "N-1", it should be 158 // interpreted as 0. 159 uint dirty_size(uint bot, uint top) const { 160 return (bot - top) & MOD_N_MASK; 161 } 162 163 // Returns the size corresponding to the given "bot" and "top". 164 uint size(uint bot, uint top) const { 165 uint sz = dirty_size(bot, top); 166 // Has the queue "wrapped", so that bottom is less than top? There's a 167 // complicated special case here. A pair of threads could perform pop_local 168 // and pop_global operations concurrently, starting from a state in which 169 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom, 170 // and the pop_global in incrementing _top (in which case the pop_global 171 // will be awarded the contested queue element.) The resulting state must 172 // be interpreted as an empty queue. (We only need to worry about one such 173 // event: only the queue owner performs pop_local's, and several concurrent 174 // threads attempting to perform the pop_global will all perform the same 175 // CAS, and only one can succeed.) Any stealing thread that reads after 176 // either the increment or decrement will see an empty queue, and will not 177 // join the competitors. The "sz == -1 || sz == N-1" state will not be 178 // modified by concurrent queues, so the owner thread can reset the state to 179 // _bottom == top so subsequent pushes will be performed normally. 180 return (sz == N - 1) ? 0 : sz; 181 } 182 183 public: 184 TaskQueueSuper() : _bottom(0), _age() {} 185 186 // Return true if the TaskQueue contains/does not contain any tasks. 187 bool peek() const { return _bottom != _age.top(); } 188 bool is_empty() const { return size() == 0; } 189 190 // Return an estimate of the number of elements in the queue. 191 // The "careful" version admits the possibility of pop_local/pop_global 192 // races. 193 uint size() const { 194 return size(_bottom, _age.top()); 195 } 196 197 uint dirty_size() const { 198 return dirty_size(_bottom, _age.top()); 199 } 200 201 void set_empty() { 202 _bottom = 0; 203 _age.set(0); 204 } 205 206 // Maximum number of elements allowed in the queue. This is two less 207 // than the actual queue size, for somewhat complicated reasons. 208 uint max_elems() const { return N - 2; } 209 210 // Total size of queue. 211 static const uint total_size() { return N; } 212 213 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;) 214 }; 215 216 // 217 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double- 218 // ended-queue (deque), intended for use in work stealing. Queue operations 219 // are non-blocking. 220 // 221 // A queue owner thread performs push() and pop_local() operations on one end 222 // of the queue, while other threads may steal work using the pop_global() 223 // method. 224 // 225 // The main difference to the original algorithm is that this 226 // implementation allows wrap-around at the end of its allocated 227 // storage, which is an array. 228 // 229 // The original paper is: 230 // 231 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G. 232 // Thread scheduling for multiprogrammed multiprocessors. 233 // Theory of Computing Systems 34, 2 (2001), 115-144. 234 // 235 // The following paper provides an correctness proof and an 236 // implementation for weakly ordered memory models including (pseudo-) 237 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is 238 // similar to ABP, with the main difference that it allows resizing of the 239 // underlying storage: 240 // 241 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z. 242 // Correct and efficient work-stealing for weak memory models 243 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and 244 // practice of parallel programming (PPoPP 2013), 69-80 245 // 246 247 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 248 class GenericTaskQueue: public TaskQueueSuper<N, F> { 249 protected: 250 typedef typename TaskQueueSuper<N, F>::Age Age; 251 typedef typename TaskQueueSuper<N, F>::idx_t idx_t; 252 253 using TaskQueueSuper<N, F>::_bottom; 254 using TaskQueueSuper<N, F>::_age; 255 using TaskQueueSuper<N, F>::increment_index; 256 using TaskQueueSuper<N, F>::decrement_index; 257 using TaskQueueSuper<N, F>::dirty_size; 258 259 public: 260 using TaskQueueSuper<N, F>::max_elems; 261 using TaskQueueSuper<N, F>::size; 262 263 #if TASKQUEUE_STATS 264 using TaskQueueSuper<N, F>::stats; 265 #endif 266 267 private: 268 // Slow paths for push, pop_local. (pop_global has no fast path.) 269 bool push_slow(E t, uint dirty_n_elems); 270 bool pop_local_slow(uint localBot, Age oldAge); 271 272 public: 273 typedef E element_type; 274 275 // Initializes the queue to empty. 276 GenericTaskQueue(); 277 278 void initialize(); 279 280 // Push the task "t" on the queue. Returns "false" iff the queue is full. 281 inline bool push(E t); 282 283 // Attempts to claim a task from the "local" end of the queue (the most 284 // recently pushed) as long as the number of entries exceeds the threshold. 285 // If successful, returns true and sets t to the task; otherwise, returns false 286 // (the queue is empty or the number of elements below the threshold). 287 inline bool pop_local(volatile E& t, uint threshold = 0); 288 289 // Like pop_local(), but uses the "global" end of the queue (the least 290 // recently pushed). 291 bool pop_global(volatile E& t); 292 293 // Delete any resource associated with the queue. 294 ~GenericTaskQueue(); 295 296 // Apply fn to each element in the task queue. The queue must not 297 // be modified while iterating. 298 template<typename Fn> void iterate(Fn fn); 299 300 private: 301 // Element array. 302 volatile E* _elems; 303 }; 304 305 template<class E, MEMFLAGS F, unsigned int N> 306 GenericTaskQueue<E, F, N>::GenericTaskQueue() { 307 assert(sizeof(Age) == sizeof(size_t), "Depends on this."); 308 } 309 310 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for 311 // elements that do not fit in the TaskQueue. 312 // 313 // This class hides two methods from super classes: 314 // 315 // push() - push onto the task queue or, if that fails, onto the overflow stack 316 // is_empty() - return true if both the TaskQueue and overflow stack are empty 317 // 318 // Note that size() is not hidden--it returns the number of elements in the 319 // TaskQueue, and does not include the size of the overflow stack. This 320 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues. 321 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 322 class OverflowTaskQueue: public GenericTaskQueue<E, F, N> 323 { 324 public: 325 typedef Stack<E, F> overflow_t; 326 typedef GenericTaskQueue<E, F, N> taskqueue_t; 327 328 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) 329 330 // Push task t onto the queue or onto the overflow stack. Return true. 331 inline bool push(E t); 332 // Try to push task t onto the queue only. Returns true if successful, false otherwise. 333 inline bool try_push_to_taskqueue(E t); 334 335 // Attempt to pop from the overflow stack; return true if anything was popped. 336 inline bool pop_overflow(E& t); 337 338 inline overflow_t* overflow_stack() { return &_overflow_stack; } 339 340 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } 341 inline bool overflow_empty() const { return _overflow_stack.is_empty(); } 342 inline bool is_empty() const { 343 return taskqueue_empty() && overflow_empty(); 344 } 345 346 private: 347 overflow_t _overflow_stack; 348 }; 349 350 class TaskQueueSetSuper { 351 protected: 352 static int randomParkAndMiller(int* seed0); 353 public: 354 // Returns "true" if some TaskQueue in the set contains a task. 355 virtual bool peek() = 0; 356 }; 357 358 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { 359 }; 360 361 template<class T, MEMFLAGS F> 362 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { 363 public: 364 typedef typename T::element_type E; 365 366 private: 367 uint _n; 368 T** _queues; 369 370 bool steal_best_of_2(uint queue_num, int* seed, E& t); 371 372 public: 373 GenericTaskQueueSet(int n); 374 ~GenericTaskQueueSet(); 375 376 void register_queue(uint i, T* q); 377 378 T* queue(uint n); 379 380 // The thread with queue number "queue_num" (and whose random number seed is 381 // at "seed") is trying to steal a task from some other queue. (It may try 382 // several queues, according to some configuration parameter.) If some steal 383 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns 384 // false. 385 bool steal(uint queue_num, int* seed, E& t); 386 387 bool peek(); 388 389 uint size() const { return _n; } 390 }; 391 392 template<class T, MEMFLAGS F> void 393 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) { 394 assert(i < _n, "index out of range."); 395 _queues[i] = q; 396 } 397 398 template<class T, MEMFLAGS F> T* 399 GenericTaskQueueSet<T, F>::queue(uint i) { 400 return _queues[i]; 401 } 402 403 template<class T, MEMFLAGS F> 404 bool GenericTaskQueueSet<T, F>::peek() { 405 // Try all the queues. 406 for (uint j = 0; j < _n; j++) { 407 if (_queues[j]->peek()) 408 return true; 409 } 410 return false; 411 } 412 413 // When to terminate from the termination protocol. 414 class TerminatorTerminator: public CHeapObj<mtInternal> { 415 public: 416 virtual bool should_exit_termination() = 0; 417 }; 418 419 // A class to aid in the termination of a set of parallel tasks using 420 // TaskQueueSet's for work stealing. 421 422 #undef TRACESPINNING 423 424 class ParallelTaskTerminator: public StackObj { 425 private: 426 uint _n_threads; 427 TaskQueueSetSuper* _queue_set; 428 volatile uint _offered_termination; 429 430 #ifdef TRACESPINNING 431 static uint _total_yields; 432 static uint _total_spins; 433 static uint _total_peeks; 434 #endif 435 436 bool peek_in_queue_set(); 437 protected: 438 virtual void yield(); 439 void sleep(uint millis); 440 441 public: 442 443 // "n_threads" is the number of threads to be terminated. "queue_set" is a 444 // queue sets of work queues of other threads. 445 ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set); 446 447 // The current thread has no work, and is ready to terminate if everyone 448 // else is. If returns "true", all threads are terminated. If returns 449 // "false", available work has been observed in one of the task queues, 450 // so the global task is not complete. 451 bool offer_termination() { 452 return offer_termination(NULL); 453 } 454 455 // As above, but it also terminates if the should_exit_termination() 456 // method of the terminator parameter returns true. If terminator is 457 // NULL, then it is ignored. 458 bool offer_termination(TerminatorTerminator* terminator); 459 460 // Reset the terminator, so that it may be reused again. 461 // The caller is responsible for ensuring that this is done 462 // in an MT-safe manner, once the previous round of use of 463 // the terminator is finished. 464 void reset_for_reuse(); 465 // Same as above but the number of parallel threads is set to the 466 // given number. 467 void reset_for_reuse(uint n_threads); 468 469 #ifdef TRACESPINNING 470 static uint total_yields() { return _total_yields; } 471 static uint total_spins() { return _total_spins; } 472 static uint total_peeks() { return _total_peeks; } 473 static void print_termination_counts(); 474 #endif 475 }; 476 477 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue; 478 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet; 479 480 #ifdef _MSC_VER 481 #pragma warning(push) 482 // warning C4522: multiple assignment operators specified 483 #pragma warning(disable:4522) 484 #endif 485 486 // This is a container class for either an oop* or a narrowOop*. 487 // Both are pushed onto a task queue and the consumer will test is_narrow() 488 // to determine which should be processed. 489 class StarTask { 490 void* _holder; // either union oop* or narrowOop* 491 492 enum { COMPRESSED_OOP_MASK = 1 }; 493 494 public: 495 StarTask(narrowOop* p) { 496 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 497 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); 498 } 499 StarTask(oop* p) { 500 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 501 _holder = (void*)p; 502 } 503 StarTask() { _holder = NULL; } 504 operator oop*() { return (oop*)_holder; } 505 operator narrowOop*() { 506 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); 507 } 508 509 StarTask& operator=(const StarTask& t) { 510 _holder = t._holder; 511 return *this; 512 } 513 volatile StarTask& operator=(const volatile StarTask& t) volatile { 514 _holder = t._holder; 515 return *this; 516 } 517 518 bool is_narrow() const { 519 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); 520 } 521 }; 522 523 class ObjArrayTask 524 { 525 public: 526 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { } 527 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) { 528 assert(idx <= size_t(max_jint), "too big"); 529 } 530 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { } 531 532 ObjArrayTask& operator =(const ObjArrayTask& t) { 533 _obj = t._obj; 534 _index = t._index; 535 return *this; 536 } 537 volatile ObjArrayTask& 538 operator =(const volatile ObjArrayTask& t) volatile { 539 (void)const_cast<oop&>(_obj = t._obj); 540 _index = t._index; 541 return *this; 542 } 543 544 inline oop obj() const { return _obj; } 545 inline int index() const { return _index; } 546 547 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. 548 549 private: 550 oop _obj; 551 int _index; 552 }; 553 554 #ifdef _MSC_VER 555 #pragma warning(pop) 556 #endif 557 558 typedef OverflowTaskQueue<StarTask, mtGC> OopStarTaskQueue; 559 typedef GenericTaskQueueSet<OopStarTaskQueue, mtGC> OopStarTaskQueueSet; 560 561 typedef OverflowTaskQueue<size_t, mtGC> RegionTaskQueue; 562 typedef GenericTaskQueueSet<RegionTaskQueue, mtGC> RegionTaskQueueSet; 563 564 #endif // SHARE_VM_GC_SHARED_TASKQUEUE_HPP