1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP 26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "runtime/mutex.hpp" 31 #include "runtime/orderAccess.inline.hpp" 32 #include "utilities/stack.hpp" 33 34 // Simple TaskQueue stats that are collected by default in debug builds. 35 36 #if !defined(TASKQUEUE_STATS) && defined(ASSERT) 37 #define TASKQUEUE_STATS 1 38 #elif !defined(TASKQUEUE_STATS) 39 #define TASKQUEUE_STATS 0 40 #endif 41 42 #if TASKQUEUE_STATS 43 #define TASKQUEUE_STATS_ONLY(code) code 44 #else 45 #define TASKQUEUE_STATS_ONLY(code) 46 #endif // TASKQUEUE_STATS 47 48 #if TASKQUEUE_STATS 49 class TaskQueueStats { 50 public: 51 enum StatId { 52 push, // number of taskqueue pushes 53 pop, // number of taskqueue pops 54 pop_slow, // subset of taskqueue pops that were done slow-path 55 steal_attempt, // number of taskqueue steal attempts 56 steal, // number of taskqueue steals 57 overflow, // number of overflow pushes 58 overflow_max_len, // max length of overflow stack 59 last_stat_id 60 }; 61 62 public: 63 inline TaskQueueStats() { reset(); } 64 65 inline void record_push() { ++_stats[push]; } 66 inline void record_pop() { ++_stats[pop]; } 67 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } 68 inline void record_steal(bool success); 69 inline void record_overflow(size_t new_length); 70 71 TaskQueueStats & operator +=(const TaskQueueStats & addend); 72 73 inline size_t get(StatId id) const { return _stats[id]; } 74 inline const size_t* get() const { return _stats; } 75 76 inline void reset(); 77 78 // Print the specified line of the header (does not include a line separator). 79 static void print_header(unsigned int line, outputStream* const stream = tty, 80 unsigned int width = 10); 81 // Print the statistics (does not include a line separator). 82 void print(outputStream* const stream = tty, unsigned int width = 10) const; 83 84 DEBUG_ONLY(void verify() const;) 85 86 private: 87 size_t _stats[last_stat_id]; 88 static const char * const _names[last_stat_id]; 89 }; 90 91 void TaskQueueStats::record_steal(bool success) { 92 ++_stats[steal_attempt]; 93 if (success) ++_stats[steal]; 94 } 95 96 void TaskQueueStats::record_overflow(size_t new_len) { 97 ++_stats[overflow]; 98 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len; 99 } 100 101 void TaskQueueStats::reset() { 102 memset(_stats, 0, sizeof(_stats)); 103 } 104 #endif // TASKQUEUE_STATS 105 106 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances. 107 108 template <unsigned int N, MEMFLAGS F> 109 class TaskQueueSuper: public CHeapObj<F> { 110 protected: 111 // Internal type for indexing the queue; also used for the tag. 112 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t; 113 114 // The first free element after the last one pushed (mod N). 115 volatile uint _bottom; 116 117 enum { MOD_N_MASK = N - 1 }; 118 119 class Age { 120 public: 121 Age(size_t data = 0) { _data = data; } 122 Age(const Age& age) { _data = age._data; } 123 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; } 124 125 Age get() const volatile { return _data; } 126 void set(Age age) volatile { _data = age._data; } 127 128 idx_t top() const volatile { return _fields._top; } 129 idx_t tag() const volatile { return _fields._tag; } 130 131 // Increment top; if it wraps, increment tag also. 132 void increment() { 133 _fields._top = increment_index(_fields._top); 134 if (_fields._top == 0) ++_fields._tag; 135 } 136 137 Age cmpxchg(const Age new_age, const Age old_age) volatile { 138 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, 139 (volatile intptr_t *)&_data, 140 (intptr_t)old_age._data); 141 } 142 143 bool operator ==(const Age& other) const { return _data == other._data; } 144 145 private: 146 struct fields { 147 idx_t _top; 148 idx_t _tag; 149 }; 150 union { 151 size_t _data; 152 fields _fields; 153 }; 154 }; 155 156 volatile Age _age; 157 158 // These both operate mod N. 159 static uint increment_index(uint ind) { 160 return (ind + 1) & MOD_N_MASK; 161 } 162 static uint decrement_index(uint ind) { 163 return (ind - 1) & MOD_N_MASK; 164 } 165 166 // Returns a number in the range [0..N). If the result is "N-1", it should be 167 // interpreted as 0. 168 uint dirty_size(uint bot, uint top) const { 169 return (bot - top) & MOD_N_MASK; 170 } 171 172 // Returns the size corresponding to the given "bot" and "top". 173 uint size(uint bot, uint top) const { 174 uint sz = dirty_size(bot, top); 175 // Has the queue "wrapped", so that bottom is less than top? There's a 176 // complicated special case here. A pair of threads could perform pop_local 177 // and pop_global operations concurrently, starting from a state in which 178 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom, 179 // and the pop_global in incrementing _top (in which case the pop_global 180 // will be awarded the contested queue element.) The resulting state must 181 // be interpreted as an empty queue. (We only need to worry about one such 182 // event: only the queue owner performs pop_local's, and several concurrent 183 // threads attempting to perform the pop_global will all perform the same 184 // CAS, and only one can succeed.) Any stealing thread that reads after 185 // either the increment or decrement will see an empty queue, and will not 186 // join the competitors. The "sz == -1 || sz == N-1" state will not be 187 // modified by concurrent queues, so the owner thread can reset the state to 188 // _bottom == top so subsequent pushes will be performed normally. 189 return (sz == N - 1) ? 0 : sz; 190 } 191 192 public: 193 TaskQueueSuper() : _bottom(0), _age() {} 194 195 // Return true if the TaskQueue contains/does not contain any tasks. 196 bool peek() const { return _bottom != _age.top(); } 197 bool is_empty() const { return size() == 0; } 198 199 // Return an estimate of the number of elements in the queue. 200 // The "careful" version admits the possibility of pop_local/pop_global 201 // races. 202 uint size() const { 203 return size(_bottom, _age.top()); 204 } 205 206 uint dirty_size() const { 207 return dirty_size(_bottom, _age.top()); 208 } 209 210 void set_empty() { 211 _bottom = 0; 212 _age.set(0); 213 } 214 215 // Maximum number of elements allowed in the queue. This is two less 216 // than the actual queue size, for somewhat complicated reasons. 217 uint max_elems() const { return N - 2; } 218 219 // Total size of queue. 220 static const uint total_size() { return N; } 221 222 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;) 223 }; 224 225 // 226 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double- 227 // ended-queue (deque), intended for use in work stealing. Queue operations 228 // are non-blocking. 229 // 230 // A queue owner thread performs push() and pop_local() operations on one end 231 // of the queue, while other threads may steal work using the pop_global() 232 // method. 233 // 234 // The main difference to the original algorithm is that this 235 // implementation allows wrap-around at the end of its allocated 236 // storage, which is an array. 237 // 238 // The original paper is: 239 // 240 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G. 241 // Thread scheduling for multiprogrammed multiprocessors. 242 // Theory of Computing Systems 34, 2 (2001), 115-144. 243 // 244 // The following paper provides an correctness proof and an 245 // implementation for weakly ordered memory models including (pseudo-) 246 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is 247 // similar to ABP, with the main difference that it allows resizing of the 248 // underlying storage: 249 // 250 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z. 251 // Correct and efficient work-stealing for weak memory models 252 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and 253 // practice of parallel programming (PPoPP 2013), 69-80 254 // 255 256 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 257 class GenericTaskQueue: public TaskQueueSuper<N, F> { 258 ArrayAllocator<E, F> _array_allocator; 259 protected: 260 typedef typename TaskQueueSuper<N, F>::Age Age; 261 typedef typename TaskQueueSuper<N, F>::idx_t idx_t; 262 263 using TaskQueueSuper<N, F>::_bottom; 264 using TaskQueueSuper<N, F>::_age; 265 using TaskQueueSuper<N, F>::increment_index; 266 using TaskQueueSuper<N, F>::decrement_index; 267 using TaskQueueSuper<N, F>::dirty_size; 268 269 public: 270 using TaskQueueSuper<N, F>::max_elems; 271 using TaskQueueSuper<N, F>::size; 272 273 #if TASKQUEUE_STATS 274 using TaskQueueSuper<N, F>::stats; 275 #endif 276 277 private: 278 // Slow paths for push, pop_local. (pop_global has no fast path.) 279 bool push_slow(E t, uint dirty_n_elems); 280 bool pop_local_slow(uint localBot, Age oldAge); 281 282 public: 283 typedef E element_type; 284 285 // Initializes the queue to empty. 286 GenericTaskQueue(); 287 288 void initialize(); 289 290 // Push the task "t" on the queue. Returns "false" iff the queue is full. 291 inline bool push(E t); 292 293 // Attempts to claim a task from the "local" end of the queue (the most 294 // recently pushed). If successful, returns true and sets t to the task; 295 // otherwise, returns false (the queue is empty). 296 inline bool pop_local(volatile E& t); 297 298 // Like pop_local(), but uses the "global" end of the queue (the least 299 // recently pushed). 300 bool pop_global(volatile E& t); 301 302 // Delete any resource associated with the queue. 303 ~GenericTaskQueue(); 304 305 // apply the closure to all elements in the task queue 306 void oops_do(OopClosure* f); 307 308 private: 309 // Element array. 310 volatile E* _elems; 311 }; 312 313 template<class E, MEMFLAGS F, unsigned int N> 314 GenericTaskQueue<E, F, N>::GenericTaskQueue() { 315 assert(sizeof(Age) == sizeof(size_t), "Depends on this."); 316 } 317 318 template<class E, MEMFLAGS F, unsigned int N> 319 void GenericTaskQueue<E, F, N>::initialize() { 320 _elems = _array_allocator.allocate(N); 321 } 322 323 template<class E, MEMFLAGS F, unsigned int N> 324 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { 325 // tty->print_cr("START OopTaskQueue::oops_do"); 326 uint iters = size(); 327 uint index = _bottom; 328 for (uint i = 0; i < iters; ++i) { 329 index = decrement_index(index); 330 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, 331 // index, &_elems[index], _elems[index]); 332 E* t = (E*)&_elems[index]; // cast away volatility 333 oop* p = (oop*)t; 334 assert((*t)->is_oop_or_null(), "Not an oop or null"); 335 f->do_oop(p); 336 } 337 // tty->print_cr("END OopTaskQueue::oops_do"); 338 } 339 340 template<class E, MEMFLAGS F, unsigned int N> 341 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) { 342 if (dirty_n_elems == N - 1) { 343 // Actually means 0, so do the push. 344 uint localBot = _bottom; 345 // g++ complains if the volatile result of the assignment is 346 // unused, so we cast the volatile away. We cannot cast directly 347 // to void, because gcc treats that as not using the result of the 348 // assignment. However, casting to E& means that we trigger an 349 // unused-value warning. So, we cast the E& to void. 350 (void)const_cast<E&>(_elems[localBot] = t); 351 OrderAccess::release_store(&_bottom, increment_index(localBot)); 352 TASKQUEUE_STATS_ONLY(stats.record_push()); 353 return true; 354 } 355 return false; 356 } 357 358 // pop_local_slow() is done by the owning thread and is trying to 359 // get the last task in the queue. It will compete with pop_global() 360 // that will be used by other threads. The tag age is incremented 361 // whenever the queue goes empty which it will do here if this thread 362 // gets the last task or in pop_global() if the queue wraps (top == 0 363 // and pop_global() succeeds, see pop_global()). 364 template<class E, MEMFLAGS F, unsigned int N> 365 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { 366 // This queue was observed to contain exactly one element; either this 367 // thread will claim it, or a competing "pop_global". In either case, 368 // the queue will be logically empty afterwards. Create a new Age value 369 // that represents the empty queue for the given value of "_bottom". (We 370 // must also increment "tag" because of the case where "bottom == 1", 371 // "top == 0". A pop_global could read the queue element in that case, 372 // then have the owner thread do a pop followed by another push. Without 373 // the incrementing of "tag", the pop_global's CAS could succeed, 374 // allowing it to believe it has claimed the stale element.) 375 Age newAge((idx_t)localBot, oldAge.tag() + 1); 376 // Perhaps a competing pop_global has already incremented "top", in which 377 // case it wins the element. 378 if (localBot == oldAge.top()) { 379 // No competing pop_global has yet incremented "top"; we'll try to 380 // install new_age, thus claiming the element. 381 Age tempAge = _age.cmpxchg(newAge, oldAge); 382 if (tempAge == oldAge) { 383 // We win. 384 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 385 TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); 386 return true; 387 } 388 } 389 // We lose; a completing pop_global gets the element. But the queue is empty 390 // and top is greater than bottom. Fix this representation of the empty queue 391 // to become the canonical one. 392 _age.set(newAge); 393 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 394 return false; 395 } 396 397 template<class E, MEMFLAGS F, unsigned int N> 398 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) { 399 Age oldAge = _age.get(); 400 // Architectures with weak memory model require a barrier here 401 // to guarantee that bottom is not older than age, 402 // which is crucial for the correctness of the algorithm. 403 #if !(defined SPARC || defined IA32 || defined AMD64) 404 OrderAccess::fence(); 405 #endif 406 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); 407 uint n_elems = size(localBot, oldAge.top()); 408 if (n_elems == 0) { 409 return false; 410 } 411 412 // g++ complains if the volatile result of the assignment is 413 // unused, so we cast the volatile away. We cannot cast directly 414 // to void, because gcc treats that as not using the result of the 415 // assignment. However, casting to E& means that we trigger an 416 // unused-value warning. So, we cast the E& to void. 417 (void) const_cast<E&>(t = _elems[oldAge.top()]); 418 Age newAge(oldAge); 419 newAge.increment(); 420 Age resAge = _age.cmpxchg(newAge, oldAge); 421 422 // Note that using "_bottom" here might fail, since a pop_local might 423 // have decremented it. 424 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); 425 return resAge == oldAge; 426 } 427 428 template<class E, MEMFLAGS F, unsigned int N> 429 GenericTaskQueue<E, F, N>::~GenericTaskQueue() { 430 FREE_C_HEAP_ARRAY(E, _elems, F); 431 } 432 433 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for 434 // elements that do not fit in the TaskQueue. 435 // 436 // This class hides two methods from super classes: 437 // 438 // push() - push onto the task queue or, if that fails, onto the overflow stack 439 // is_empty() - return true if both the TaskQueue and overflow stack are empty 440 // 441 // Note that size() is not hidden--it returns the number of elements in the 442 // TaskQueue, and does not include the size of the overflow stack. This 443 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues. 444 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 445 class OverflowTaskQueue: public GenericTaskQueue<E, F, N> 446 { 447 public: 448 typedef Stack<E, F> overflow_t; 449 typedef GenericTaskQueue<E, F, N> taskqueue_t; 450 451 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) 452 453 // Push task t onto the queue or onto the overflow stack. Return true. 454 inline bool push(E t); 455 456 // Try to push task t onto the queue only. Returns true if successful, false otherwise. 457 inline bool try_push_to_taskqueue(E t); 458 459 // Attempt to pop from the overflow stack; return true if anything was popped. 460 inline bool pop_overflow(E& t); 461 462 inline overflow_t* overflow_stack() { return &_overflow_stack; } 463 464 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } 465 inline bool overflow_empty() const { return _overflow_stack.is_empty(); } 466 inline bool is_empty() const { 467 return taskqueue_empty() && overflow_empty(); 468 } 469 470 private: 471 overflow_t _overflow_stack; 472 }; 473 474 template <class E, MEMFLAGS F, unsigned int N> 475 bool OverflowTaskQueue<E, F, N>::push(E t) 476 { 477 if (!taskqueue_t::push(t)) { 478 overflow_stack()->push(t); 479 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); 480 } 481 return true; 482 } 483 484 template <class E, MEMFLAGS F, unsigned int N> 485 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) 486 { 487 if (overflow_empty()) return false; 488 t = overflow_stack()->pop(); 489 return true; 490 } 491 492 template <class E, MEMFLAGS F, unsigned int N> 493 bool OverflowTaskQueue<E, F, N>::try_push_to_taskqueue(E t) { 494 return taskqueue_t::push(t); 495 } 496 class TaskQueueSetSuper { 497 protected: 498 static int randomParkAndMiller(int* seed0); 499 public: 500 // Returns "true" if some TaskQueue in the set contains a task. 501 virtual bool peek() = 0; 502 virtual size_t tasks() = 0; 503 }; 504 505 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { 506 }; 507 508 template<class T, MEMFLAGS F> 509 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { 510 private: 511 uint _n; 512 T** _queues; 513 514 public: 515 typedef typename T::element_type E; 516 517 GenericTaskQueueSet(int n) : _n(n) { 518 typedef T* GenericTaskQueuePtr; 519 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); 520 for (int i = 0; i < n; i++) { 521 _queues[i] = NULL; 522 } 523 } 524 525 bool steal_best_of_2(uint queue_num, int* seed, E& t); 526 527 void register_queue(uint i, T* q); 528 529 T* queue(uint n); 530 531 // The thread with queue number "queue_num" (and whose random number seed is 532 // at "seed") is trying to steal a task from some other queue. (It may try 533 // several queues, according to some configuration parameter.) If some steal 534 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns 535 // false. 536 bool steal(uint queue_num, int* seed, E& t); 537 538 bool peek(); 539 size_t tasks(); 540 541 uint size() const { return _n; } 542 }; 543 544 template<class T, MEMFLAGS F> void 545 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) { 546 assert(i < _n, "index out of range."); 547 _queues[i] = q; 548 } 549 550 template<class T, MEMFLAGS F> T* 551 GenericTaskQueueSet<T, F>::queue(uint i) { 552 return _queues[i]; 553 } 554 555 template<class T, MEMFLAGS F> bool 556 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) { 557 for (uint i = 0; i < 2 * _n; i++) { 558 if (steal_best_of_2(queue_num, seed, t)) { 559 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); 560 return true; 561 } 562 } 563 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); 564 return false; 565 } 566 567 template<class T, MEMFLAGS F> bool 568 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) { 569 if (_n > 2) { 570 uint k1 = queue_num; 571 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 572 uint k2 = queue_num; 573 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 574 // Sample both and try the larger. 575 uint sz1 = _queues[k1]->size(); 576 uint sz2 = _queues[k2]->size(); 577 if (sz2 > sz1) return _queues[k2]->pop_global(t); 578 else return _queues[k1]->pop_global(t); 579 } else if (_n == 2) { 580 // Just try the other one. 581 uint k = (queue_num + 1) % 2; 582 return _queues[k]->pop_global(t); 583 } else { 584 assert(_n == 1, "can't be zero."); 585 return false; 586 } 587 } 588 589 template<class T, MEMFLAGS F> 590 bool GenericTaskQueueSet<T, F>::peek() { 591 // Try all the queues. 592 for (uint j = 0; j < _n; j++) { 593 if (_queues[j]->peek()) 594 return true; 595 } 596 return false; 597 } 598 599 template<class T, MEMFLAGS F> 600 size_t GenericTaskQueueSet<T, F>::tasks() { 601 size_t n = 0; 602 for (uint j = 0; j < _n; j++) { 603 n += _queues[j]->size(); 604 } 605 return n; 606 } 607 608 // When to terminate from the termination protocol. 609 class TerminatorTerminator: public CHeapObj<mtInternal> { 610 public: 611 virtual bool should_exit_termination() = 0; 612 virtual bool should_force_termination() { return false; } 613 }; 614 615 // A class to aid in the termination of a set of parallel tasks using 616 // TaskQueueSet's for work stealing. 617 618 #undef TRACESPINNING 619 620 class ParallelTaskTerminator: public StackObj { 621 protected: 622 int _n_threads; 623 TaskQueueSetSuper* _queue_set; 624 int _offered_termination; 625 626 #ifdef TRACESPINNING 627 static uint _total_yields; 628 static uint _total_spins; 629 static uint _total_peeks; 630 #endif 631 632 bool peek_in_queue_set(); 633 protected: 634 virtual void yield(); 635 void sleep(uint millis); 636 637 public: 638 639 // "n_threads" is the number of threads to be terminated. "queue_set" is a 640 // queue sets of work queues of other threads. 641 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set); 642 643 // The current thread has no work, and is ready to terminate if everyone 644 // else is. If returns "true", all threads are terminated. If returns 645 // "false", available work has been observed in one of the task queues, 646 // so the global task is not complete. 647 // If force is set to true, it terminates even if there's remaining work left 648 virtual bool offer_termination() { 649 return offer_termination(NULL); 650 } 651 652 // As above, but it also terminates if the should_exit_termination() 653 // method of the terminator parameter returns true. If terminator is 654 // NULL, then it is ignored. 655 // If force is set to true, it terminates even if there's remaining work left 656 bool offer_termination(TerminatorTerminator* terminator); 657 658 // Reset the terminator, so that it may be reused again. 659 // The caller is responsible for ensuring that this is done 660 // in an MT-safe manner, once the previous round of use of 661 // the terminator is finished. 662 void reset_for_reuse(); 663 // Same as above but the number of parallel threads is set to the 664 // given number. 665 void reset_for_reuse(int n_threads); 666 667 #ifdef TRACESPINNING 668 static uint total_yields() { return _total_yields; } 669 static uint total_spins() { return _total_spins; } 670 static uint total_peeks() { return _total_peeks; } 671 static void print_termination_counts(); 672 #endif 673 }; 674 675 template<class E, MEMFLAGS F, unsigned int N> inline bool 676 GenericTaskQueue<E, F, N>::push(E t) { 677 uint localBot = _bottom; 678 assert(localBot < N, "_bottom out of range."); 679 idx_t top = _age.top(); 680 uint dirty_n_elems = dirty_size(localBot, top); 681 assert(dirty_n_elems < N, "n_elems out of range."); 682 if (dirty_n_elems < max_elems()) { 683 // g++ complains if the volatile result of the assignment is 684 // unused, so we cast the volatile away. We cannot cast directly 685 // to void, because gcc treats that as not using the result of the 686 // assignment. However, casting to E& means that we trigger an 687 // unused-value warning. So, we cast the E& to void. 688 (void) const_cast<E&>(_elems[localBot] = t); 689 OrderAccess::release_store(&_bottom, increment_index(localBot)); 690 TASKQUEUE_STATS_ONLY(stats.record_push()); 691 return true; 692 } else { 693 return push_slow(t, dirty_n_elems); 694 } 695 } 696 697 template<class E, MEMFLAGS F, unsigned int N> inline bool 698 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) { 699 uint localBot = _bottom; 700 // This value cannot be N-1. That can only occur as a result of 701 // the assignment to bottom in this method. If it does, this method 702 // resets the size to 0 before the next call (which is sequential, 703 // since this is pop_local.) 704 uint dirty_n_elems = dirty_size(localBot, _age.top()); 705 assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); 706 if (dirty_n_elems == 0) return false; 707 localBot = decrement_index(localBot); 708 _bottom = localBot; 709 // This is necessary to prevent any read below from being reordered 710 // before the store just above. 711 OrderAccess::fence(); 712 // g++ complains if the volatile result of the assignment is 713 // unused, so we cast the volatile away. We cannot cast directly 714 // to void, because gcc treats that as not using the result of the 715 // assignment. However, casting to E& means that we trigger an 716 // unused-value warning. So, we cast the E& to void. 717 (void) const_cast<E&>(t = _elems[localBot]); 718 // This is a second read of "age"; the "size()" above is the first. 719 // If there's still at least one element in the queue, based on the 720 // "_bottom" and "age" we've read, then there can be no interference with 721 // a "pop_global" operation, and we're done. 722 idx_t tp = _age.top(); // XXX 723 if (size(localBot, tp) > 0) { 724 assert(dirty_size(localBot, tp) != N - 1, "sanity"); 725 TASKQUEUE_STATS_ONLY(stats.record_pop()); 726 return true; 727 } else { 728 // Otherwise, the queue contained exactly one element; we take the slow 729 // path. 730 return pop_local_slow(localBot, _age.get()); 731 } 732 } 733 734 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue; 735 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet; 736 737 #ifdef _MSC_VER 738 #pragma warning(push) 739 // warning C4522: multiple assignment operators specified 740 #pragma warning(disable:4522) 741 #endif 742 743 // This is a container class for either an oop* or a narrowOop*. 744 // Both are pushed onto a task queue and the consumer will test is_narrow() 745 // to determine which should be processed. 746 class StarTask { 747 void* _holder; // either union oop* or narrowOop* 748 749 enum { COMPRESSED_OOP_MASK = 1 }; 750 751 public: 752 StarTask(narrowOop* p) { 753 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 754 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); 755 } 756 StarTask(oop* p) { 757 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 758 _holder = (void*)p; 759 } 760 StarTask() { _holder = NULL; } 761 operator oop*() { return (oop*)_holder; } 762 operator narrowOop*() { 763 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); 764 } 765 766 StarTask& operator=(const StarTask& t) { 767 _holder = t._holder; 768 return *this; 769 } 770 volatile StarTask& operator=(const volatile StarTask& t) volatile { 771 _holder = t._holder; 772 return *this; 773 } 774 775 bool is_narrow() const { 776 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); 777 } 778 }; 779 780 class ObjArrayTask 781 { 782 public: 783 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { } 784 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) { 785 assert(idx <= size_t(max_jint), "too big"); 786 } 787 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { } 788 789 ObjArrayTask& operator =(const ObjArrayTask& t) { 790 _obj = t._obj; 791 _index = t._index; 792 return *this; 793 } 794 volatile ObjArrayTask& 795 operator =(const volatile ObjArrayTask& t) volatile { 796 (void)const_cast<oop&>(_obj = t._obj); 797 _index = t._index; 798 return *this; 799 } 800 801 inline oop obj() const { return _obj; } 802 inline int index() const { return _index; } 803 804 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. 805 806 private: 807 oop _obj; 808 int _index; 809 }; 810 811 #ifdef _MSC_VER 812 #pragma warning(pop) 813 #endif 814 815 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue; 816 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet; 817 818 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue; 819 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet; 820 821 822 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP