1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP 26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "runtime/mutex.hpp" 31 #include "utilities/stack.hpp" 32 #ifdef TARGET_OS_ARCH_linux_x86 33 # include "orderAccess_linux_x86.inline.hpp" 34 #endif 35 #ifdef TARGET_OS_ARCH_linux_sparc 36 # include "orderAccess_linux_sparc.inline.hpp" 37 #endif 38 #ifdef TARGET_OS_ARCH_linux_zero 39 # include "orderAccess_linux_zero.inline.hpp" 40 #endif 41 #ifdef TARGET_OS_ARCH_solaris_x86 42 # include "orderAccess_solaris_x86.inline.hpp" 43 #endif 44 #ifdef TARGET_OS_ARCH_solaris_sparc 45 # include "orderAccess_solaris_sparc.inline.hpp" 46 #endif 47 #ifdef TARGET_OS_ARCH_windows_x86 48 # include "orderAccess_windows_x86.inline.hpp" 49 #endif 50 #ifdef TARGET_OS_ARCH_linux_arm 51 # include "orderAccess_linux_arm.inline.hpp" 52 #endif 53 #ifdef TARGET_OS_ARCH_linux_ppc 54 # include "orderAccess_linux_ppc.inline.hpp" 55 #endif 56 #ifdef TARGET_OS_ARCH_aix_ppc 57 # include "orderAccess_aix_ppc.inline.hpp" 58 #endif 59 #ifdef TARGET_OS_ARCH_bsd_x86 60 # include "orderAccess_bsd_x86.inline.hpp" 61 #endif 62 #ifdef TARGET_OS_ARCH_bsd_zero 63 # include "orderAccess_bsd_zero.inline.hpp" 64 #endif 65 66 // Simple TaskQueue stats that are collected by default in debug builds. 67 68 #if !defined(TASKQUEUE_STATS) && defined(ASSERT) 69 #define TASKQUEUE_STATS 1 70 #elif !defined(TASKQUEUE_STATS) 71 #define TASKQUEUE_STATS 0 72 #endif 73 74 #if TASKQUEUE_STATS 75 #define TASKQUEUE_STATS_ONLY(code) code 76 #else 77 #define TASKQUEUE_STATS_ONLY(code) 78 #endif // TASKQUEUE_STATS 79 80 #if TASKQUEUE_STATS 81 class TaskQueueStats { 82 public: 83 enum StatId { 84 push, // number of taskqueue pushes 85 pop, // number of taskqueue pops 86 pop_slow, // subset of taskqueue pops that were done slow-path 87 steal_attempt, // number of taskqueue steal attempts 88 steal, // number of taskqueue steals 89 overflow, // number of overflow pushes 90 overflow_max_len, // max length of overflow stack 91 last_stat_id 92 }; 93 94 public: 95 inline TaskQueueStats() { reset(); } 96 97 inline void record_push() { ++_stats[push]; } 98 inline void record_pop() { ++_stats[pop]; } 99 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } 100 inline void record_steal(bool success); 101 inline void record_overflow(size_t new_length); 102 103 TaskQueueStats & operator +=(const TaskQueueStats & addend); 104 105 inline size_t get(StatId id) const { return _stats[id]; } 106 inline const size_t* get() const { return _stats; } 107 108 inline void reset(); 109 110 // Print the specified line of the header (does not include a line separator). 111 static void print_header(unsigned int line, outputStream* const stream = tty, 112 unsigned int width = 10); 113 // Print the statistics (does not include a line separator). 114 void print(outputStream* const stream = tty, unsigned int width = 10) const; 115 116 DEBUG_ONLY(void verify() const;) 117 118 private: 119 size_t _stats[last_stat_id]; 120 static const char * const _names[last_stat_id]; 121 }; 122 123 void TaskQueueStats::record_steal(bool success) { 124 ++_stats[steal_attempt]; 125 if (success) ++_stats[steal]; 126 } 127 128 void TaskQueueStats::record_overflow(size_t new_len) { 129 ++_stats[overflow]; 130 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len; 131 } 132 133 void TaskQueueStats::reset() { 134 memset(_stats, 0, sizeof(_stats)); 135 } 136 #endif // TASKQUEUE_STATS 137 138 template <unsigned int N, MEMFLAGS F> 139 class TaskQueueSuper: public CHeapObj<F> { 140 protected: 141 // Internal type for indexing the queue; also used for the tag. 142 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t; 143 144 // The first free element after the last one pushed (mod N). 145 volatile uint _bottom; 146 147 enum { MOD_N_MASK = N - 1 }; 148 149 class Age { 150 public: 151 Age(size_t data = 0) { _data = data; } 152 Age(const Age& age) { _data = age._data; } 153 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; } 154 155 Age get() const volatile { return _data; } 156 void set(Age age) volatile { _data = age._data; } 157 158 idx_t top() const volatile { return _fields._top; } 159 idx_t tag() const volatile { return _fields._tag; } 160 161 // Increment top; if it wraps, increment tag also. 162 void increment() { 163 _fields._top = increment_index(_fields._top); 164 if (_fields._top == 0) ++_fields._tag; 165 } 166 167 Age cmpxchg(const Age new_age, const Age old_age) volatile { 168 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, 169 (volatile intptr_t *)&_data, 170 (intptr_t)old_age._data); 171 } 172 173 bool operator ==(const Age& other) const { return _data == other._data; } 174 175 private: 176 struct fields { 177 idx_t _top; 178 idx_t _tag; 179 }; 180 union { 181 size_t _data; 182 fields _fields; 183 }; 184 }; 185 186 volatile Age _age; 187 188 // These both operate mod N. 189 static uint increment_index(uint ind) { 190 return (ind + 1) & MOD_N_MASK; 191 } 192 static uint decrement_index(uint ind) { 193 return (ind - 1) & MOD_N_MASK; 194 } 195 196 // Returns a number in the range [0..N). If the result is "N-1", it should be 197 // interpreted as 0. 198 uint dirty_size(uint bot, uint top) const { 199 return (bot - top) & MOD_N_MASK; 200 } 201 202 // Returns the size corresponding to the given "bot" and "top". 203 uint size(uint bot, uint top) const { 204 uint sz = dirty_size(bot, top); 205 // Has the queue "wrapped", so that bottom is less than top? There's a 206 // complicated special case here. A pair of threads could perform pop_local 207 // and pop_global operations concurrently, starting from a state in which 208 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom, 209 // and the pop_global in incrementing _top (in which case the pop_global 210 // will be awarded the contested queue element.) The resulting state must 211 // be interpreted as an empty queue. (We only need to worry about one such 212 // event: only the queue owner performs pop_local's, and several concurrent 213 // threads attempting to perform the pop_global will all perform the same 214 // CAS, and only one can succeed.) Any stealing thread that reads after 215 // either the increment or decrement will see an empty queue, and will not 216 // join the competitors. The "sz == -1 || sz == N-1" state will not be 217 // modified by concurrent queues, so the owner thread can reset the state to 218 // _bottom == top so subsequent pushes will be performed normally. 219 return (sz == N - 1) ? 0 : sz; 220 } 221 222 public: 223 TaskQueueSuper() : _bottom(0), _age() {} 224 225 // Return true if the TaskQueue contains/does not contain any tasks. 226 bool peek() const { return _bottom != _age.top(); } 227 bool is_empty() const { return size() == 0; } 228 229 // Return an estimate of the number of elements in the queue. 230 // The "careful" version admits the possibility of pop_local/pop_global 231 // races. 232 uint size() const { 233 return size(_bottom, _age.top()); 234 } 235 236 uint dirty_size() const { 237 return dirty_size(_bottom, _age.top()); 238 } 239 240 void set_empty() { 241 _bottom = 0; 242 _age.set(0); 243 } 244 245 // Maximum number of elements allowed in the queue. This is two less 246 // than the actual queue size, for somewhat complicated reasons. 247 uint max_elems() const { return N - 2; } 248 249 // Total size of queue. 250 static const uint total_size() { return N; } 251 252 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;) 253 }; 254 255 256 257 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 258 class GenericTaskQueue: public TaskQueueSuper<N, F> { 259 ArrayAllocator<E, F> _array_allocator; 260 protected: 261 typedef typename TaskQueueSuper<N, F>::Age Age; 262 typedef typename TaskQueueSuper<N, F>::idx_t idx_t; 263 264 using TaskQueueSuper<N, F>::_bottom; 265 using TaskQueueSuper<N, F>::_age; 266 using TaskQueueSuper<N, F>::increment_index; 267 using TaskQueueSuper<N, F>::decrement_index; 268 using TaskQueueSuper<N, F>::dirty_size; 269 270 public: 271 using TaskQueueSuper<N, F>::max_elems; 272 using TaskQueueSuper<N, F>::size; 273 274 #if TASKQUEUE_STATS 275 using TaskQueueSuper<N, F>::stats; 276 #endif 277 278 private: 279 // Slow paths for push, pop_local. (pop_global has no fast path.) 280 bool push_slow(E t, uint dirty_n_elems); 281 bool pop_local_slow(uint localBot, Age oldAge); 282 283 public: 284 typedef E element_type; 285 286 // Initializes the queue to empty. 287 GenericTaskQueue(); 288 289 void initialize(); 290 291 // Push the task "t" on the queue. Returns "false" iff the queue is full. 292 inline bool push(E t); 293 294 // Attempts to claim a task from the "local" end of the queue (the most 295 // recently pushed). If successful, returns true and sets t to the task; 296 // otherwise, returns false (the queue is empty). 297 inline bool pop_local(E& t); 298 299 // Like pop_local(), but uses the "global" end of the queue (the least 300 // recently pushed). 301 bool pop_global(E& t); 302 303 // Delete any resource associated with the queue. 304 ~GenericTaskQueue(); 305 306 // apply the closure to all elements in the task queue 307 void oops_do(OopClosure* f); 308 309 private: 310 // Element array. 311 volatile E* _elems; 312 }; 313 314 template<class E, MEMFLAGS F, unsigned int N> 315 GenericTaskQueue<E, F, N>::GenericTaskQueue() { 316 assert(sizeof(Age) == sizeof(size_t), "Depends on this."); 317 } 318 319 template<class E, MEMFLAGS F, unsigned int N> 320 void GenericTaskQueue<E, F, N>::initialize() { 321 _elems = _array_allocator.allocate(N); 322 } 323 324 template<class E, MEMFLAGS F, unsigned int N> 325 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { 326 // tty->print_cr("START OopTaskQueue::oops_do"); 327 uint iters = size(); 328 uint index = _bottom; 329 for (uint i = 0; i < iters; ++i) { 330 index = decrement_index(index); 331 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, 332 // index, &_elems[index], _elems[index]); 333 E* t = (E*)&_elems[index]; // cast away volatility 334 oop* p = (oop*)t; 335 assert((*t)->is_oop_or_null(), "Not an oop or null"); 336 f->do_oop(p); 337 } 338 // tty->print_cr("END OopTaskQueue::oops_do"); 339 } 340 341 template<class E, MEMFLAGS F, unsigned int N> 342 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) { 343 if (dirty_n_elems == N - 1) { 344 // Actually means 0, so do the push. 345 uint localBot = _bottom; 346 // g++ complains if the volatile result of the assignment is 347 // unused, so we cast the volatile away. We cannot cast directly 348 // to void, because gcc treats that as not using the result of the 349 // assignment. However, casting to E& means that we trigger an 350 // unused-value warning. So, we cast the E& to void. 351 (void)const_cast<E&>(_elems[localBot] = t); 352 OrderAccess::release_store(&_bottom, increment_index(localBot)); 353 TASKQUEUE_STATS_ONLY(stats.record_push()); 354 return true; 355 } 356 return false; 357 } 358 359 // pop_local_slow() is done by the owning thread and is trying to 360 // get the last task in the queue. It will compete with pop_global() 361 // that will be used by other threads. The tag age is incremented 362 // whenever the queue goes empty which it will do here if this thread 363 // gets the last task or in pop_global() if the queue wraps (top == 0 364 // and pop_global() succeeds, see pop_global()). 365 template<class E, MEMFLAGS F, unsigned int N> 366 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { 367 // This queue was observed to contain exactly one element; either this 368 // thread will claim it, or a competing "pop_global". In either case, 369 // the queue will be logically empty afterwards. Create a new Age value 370 // that represents the empty queue for the given value of "_bottom". (We 371 // must also increment "tag" because of the case where "bottom == 1", 372 // "top == 0". A pop_global could read the queue element in that case, 373 // then have the owner thread do a pop followed by another push. Without 374 // the incrementing of "tag", the pop_global's CAS could succeed, 375 // allowing it to believe it has claimed the stale element.) 376 Age newAge((idx_t)localBot, oldAge.tag() + 1); 377 // Perhaps a competing pop_global has already incremented "top", in which 378 // case it wins the element. 379 if (localBot == oldAge.top()) { 380 // No competing pop_global has yet incremented "top"; we'll try to 381 // install new_age, thus claiming the element. 382 Age tempAge = _age.cmpxchg(newAge, oldAge); 383 if (tempAge == oldAge) { 384 // We win. 385 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 386 TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); 387 return true; 388 } 389 } 390 // We lose; a completing pop_global gets the element. But the queue is empty 391 // and top is greater than bottom. Fix this representation of the empty queue 392 // to become the canonical one. 393 _age.set(newAge); 394 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); 395 return false; 396 } 397 398 template<class E, MEMFLAGS F, unsigned int N> 399 bool GenericTaskQueue<E, F, N>::pop_global(E& t) { 400 Age oldAge = _age.get(); 401 uint localBot = _bottom; 402 uint n_elems = size(localBot, oldAge.top()); 403 if (n_elems == 0) { 404 return false; 405 } 406 407 // g++ complains if the volatile result of the assignment is 408 // unused, so we cast the volatile away. We cannot cast directly 409 // to void, because gcc treats that as not using the result of the 410 // assignment. However, casting to E& means that we trigger an 411 // unused-value warning. So, we cast the E& to void. 412 (void) const_cast<E&>(t = _elems[oldAge.top()]); 413 Age newAge(oldAge); 414 newAge.increment(); 415 Age resAge = _age.cmpxchg(newAge, oldAge); 416 417 // Note that using "_bottom" here might fail, since a pop_local might 418 // have decremented it. 419 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); 420 return resAge == oldAge; 421 } 422 423 template<class E, MEMFLAGS F, unsigned int N> 424 GenericTaskQueue<E, F, N>::~GenericTaskQueue() { 425 FREE_C_HEAP_ARRAY(E, _elems, F); 426 } 427 428 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for 429 // elements that do not fit in the TaskQueue. 430 // 431 // This class hides two methods from super classes: 432 // 433 // push() - push onto the task queue or, if that fails, onto the overflow stack 434 // is_empty() - return true if both the TaskQueue and overflow stack are empty 435 // 436 // Note that size() is not hidden--it returns the number of elements in the 437 // TaskQueue, and does not include the size of the overflow stack. This 438 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues. 439 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> 440 class OverflowTaskQueue: public GenericTaskQueue<E, F, N> 441 { 442 public: 443 typedef Stack<E, F> overflow_t; 444 typedef GenericTaskQueue<E, F, N> taskqueue_t; 445 446 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) 447 448 // Push task t onto the queue or onto the overflow stack. Return true. 449 inline bool push(E t); 450 451 // Attempt to pop from the overflow stack; return true if anything was popped. 452 inline bool pop_overflow(E& t); 453 454 inline overflow_t* overflow_stack() { return &_overflow_stack; } 455 456 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } 457 inline bool overflow_empty() const { return _overflow_stack.is_empty(); } 458 inline bool is_empty() const { 459 return taskqueue_empty() && overflow_empty(); 460 } 461 462 private: 463 overflow_t _overflow_stack; 464 }; 465 466 template <class E, MEMFLAGS F, unsigned int N> 467 bool OverflowTaskQueue<E, F, N>::push(E t) 468 { 469 if (!taskqueue_t::push(t)) { 470 overflow_stack()->push(t); 471 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); 472 } 473 return true; 474 } 475 476 template <class E, MEMFLAGS F, unsigned int N> 477 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) 478 { 479 if (overflow_empty()) return false; 480 t = overflow_stack()->pop(); 481 return true; 482 } 483 484 class TaskQueueSetSuper { 485 protected: 486 static int randomParkAndMiller(int* seed0); 487 public: 488 // Returns "true" if some TaskQueue in the set contains a task. 489 virtual bool peek() = 0; 490 }; 491 492 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { 493 }; 494 495 template<class T, MEMFLAGS F> 496 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { 497 private: 498 uint _n; 499 T** _queues; 500 501 public: 502 typedef typename T::element_type E; 503 504 GenericTaskQueueSet(int n) : _n(n) { 505 typedef T* GenericTaskQueuePtr; 506 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); 507 for (int i = 0; i < n; i++) { 508 _queues[i] = NULL; 509 } 510 } 511 512 bool steal_best_of_2(uint queue_num, int* seed, E& t); 513 514 void register_queue(uint i, T* q); 515 516 T* queue(uint n); 517 518 // The thread with queue number "queue_num" (and whose random number seed is 519 // at "seed") is trying to steal a task from some other queue. (It may try 520 // several queues, according to some configuration parameter.) If some steal 521 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns 522 // false. 523 bool steal(uint queue_num, int* seed, E& t); 524 525 bool peek(); 526 }; 527 528 template<class T, MEMFLAGS F> void 529 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) { 530 assert(i < _n, "index out of range."); 531 _queues[i] = q; 532 } 533 534 template<class T, MEMFLAGS F> T* 535 GenericTaskQueueSet<T, F>::queue(uint i) { 536 return _queues[i]; 537 } 538 539 template<class T, MEMFLAGS F> bool 540 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) { 541 for (uint i = 0; i < 2 * _n; i++) { 542 if (steal_best_of_2(queue_num, seed, t)) { 543 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); 544 return true; 545 } 546 } 547 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); 548 return false; 549 } 550 551 template<class T, MEMFLAGS F> bool 552 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) { 553 if (_n > 2) { 554 uint k1 = queue_num; 555 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 556 uint k2 = queue_num; 557 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; 558 // Sample both and try the larger. 559 uint sz1 = _queues[k1]->size(); 560 uint sz2 = _queues[k2]->size(); 561 if (sz2 > sz1) return _queues[k2]->pop_global(t); 562 else return _queues[k1]->pop_global(t); 563 } else if (_n == 2) { 564 // Just try the other one. 565 uint k = (queue_num + 1) % 2; 566 return _queues[k]->pop_global(t); 567 } else { 568 assert(_n == 1, "can't be zero."); 569 return false; 570 } 571 } 572 573 template<class T, MEMFLAGS F> 574 bool GenericTaskQueueSet<T, F>::peek() { 575 // Try all the queues. 576 for (uint j = 0; j < _n; j++) { 577 if (_queues[j]->peek()) 578 return true; 579 } 580 return false; 581 } 582 583 // When to terminate from the termination protocol. 584 class TerminatorTerminator: public CHeapObj<mtInternal> { 585 public: 586 virtual bool should_exit_termination() = 0; 587 }; 588 589 // A class to aid in the termination of a set of parallel tasks using 590 // TaskQueueSet's for work stealing. 591 592 #undef TRACESPINNING 593 594 class ParallelTaskTerminator: public StackObj { 595 private: 596 int _n_threads; 597 TaskQueueSetSuper* _queue_set; 598 int _offered_termination; 599 600 #ifdef TRACESPINNING 601 static uint _total_yields; 602 static uint _total_spins; 603 static uint _total_peeks; 604 #endif 605 606 bool peek_in_queue_set(); 607 protected: 608 virtual void yield(); 609 void sleep(uint millis); 610 611 public: 612 613 // "n_threads" is the number of threads to be terminated. "queue_set" is a 614 // queue sets of work queues of other threads. 615 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set); 616 617 // The current thread has no work, and is ready to terminate if everyone 618 // else is. If returns "true", all threads are terminated. If returns 619 // "false", available work has been observed in one of the task queues, 620 // so the global task is not complete. 621 bool offer_termination() { 622 return offer_termination(NULL); 623 } 624 625 // As above, but it also terminates if the should_exit_termination() 626 // method of the terminator parameter returns true. If terminator is 627 // NULL, then it is ignored. 628 bool offer_termination(TerminatorTerminator* terminator); 629 630 // Reset the terminator, so that it may be reused again. 631 // The caller is responsible for ensuring that this is done 632 // in an MT-safe manner, once the previous round of use of 633 // the terminator is finished. 634 void reset_for_reuse(); 635 // Same as above but the number of parallel threads is set to the 636 // given number. 637 void reset_for_reuse(int n_threads); 638 639 #ifdef TRACESPINNING 640 static uint total_yields() { return _total_yields; } 641 static uint total_spins() { return _total_spins; } 642 static uint total_peeks() { return _total_peeks; } 643 static void print_termination_counts(); 644 #endif 645 }; 646 647 template<class E, MEMFLAGS F, unsigned int N> inline bool 648 GenericTaskQueue<E, F, N>::push(E t) { 649 uint localBot = _bottom; 650 assert((localBot >= 0) && (localBot < N), "_bottom out of range."); 651 idx_t top = _age.top(); 652 uint dirty_n_elems = dirty_size(localBot, top); 653 assert(dirty_n_elems < N, "n_elems out of range."); 654 if (dirty_n_elems < max_elems()) { 655 // g++ complains if the volatile result of the assignment is 656 // unused, so we cast the volatile away. We cannot cast directly 657 // to void, because gcc treats that as not using the result of the 658 // assignment. However, casting to E& means that we trigger an 659 // unused-value warning. So, we cast the E& to void. 660 (void) const_cast<E&>(_elems[localBot] = t); 661 OrderAccess::release_store(&_bottom, increment_index(localBot)); 662 TASKQUEUE_STATS_ONLY(stats.record_push()); 663 return true; 664 } else { 665 return push_slow(t, dirty_n_elems); 666 } 667 } 668 669 template<class E, MEMFLAGS F, unsigned int N> inline bool 670 GenericTaskQueue<E, F, N>::pop_local(E& t) { 671 uint localBot = _bottom; 672 // This value cannot be N-1. That can only occur as a result of 673 // the assignment to bottom in this method. If it does, this method 674 // resets the size to 0 before the next call (which is sequential, 675 // since this is pop_local.) 676 uint dirty_n_elems = dirty_size(localBot, _age.top()); 677 assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); 678 if (dirty_n_elems == 0) return false; 679 localBot = decrement_index(localBot); 680 _bottom = localBot; 681 // This is necessary to prevent any read below from being reordered 682 // before the store just above. 683 OrderAccess::fence(); 684 // g++ complains if the volatile result of the assignment is 685 // unused, so we cast the volatile away. We cannot cast directly 686 // to void, because gcc treats that as not using the result of the 687 // assignment. However, casting to E& means that we trigger an 688 // unused-value warning. So, we cast the E& to void. 689 (void) const_cast<E&>(t = _elems[localBot]); 690 // This is a second read of "age"; the "size()" above is the first. 691 // If there's still at least one element in the queue, based on the 692 // "_bottom" and "age" we've read, then there can be no interference with 693 // a "pop_global" operation, and we're done. 694 idx_t tp = _age.top(); // XXX 695 if (size(localBot, tp) > 0) { 696 assert(dirty_size(localBot, tp) != N - 1, "sanity"); 697 TASKQUEUE_STATS_ONLY(stats.record_pop()); 698 return true; 699 } else { 700 // Otherwise, the queue contained exactly one element; we take the slow 701 // path. 702 return pop_local_slow(localBot, _age.get()); 703 } 704 } 705 706 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue; 707 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet; 708 709 #ifdef _MSC_VER 710 #pragma warning(push) 711 // warning C4522: multiple assignment operators specified 712 #pragma warning(disable:4522) 713 #endif 714 715 // This is a container class for either an oop* or a narrowOop*. 716 // Both are pushed onto a task queue and the consumer will test is_narrow() 717 // to determine which should be processed. 718 class StarTask { 719 void* _holder; // either union oop* or narrowOop* 720 721 enum { COMPRESSED_OOP_MASK = 1 }; 722 723 public: 724 StarTask(narrowOop* p) { 725 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 726 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); 727 } 728 StarTask(oop* p) { 729 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); 730 _holder = (void*)p; 731 } 732 StarTask() { _holder = NULL; } 733 operator oop*() { return (oop*)_holder; } 734 operator narrowOop*() { 735 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); 736 } 737 738 StarTask& operator=(const StarTask& t) { 739 _holder = t._holder; 740 return *this; 741 } 742 volatile StarTask& operator=(const volatile StarTask& t) volatile { 743 _holder = t._holder; 744 return *this; 745 } 746 747 bool is_narrow() const { 748 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); 749 } 750 }; 751 752 class ObjArrayTask 753 { 754 public: 755 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { } 756 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) { 757 assert(idx <= size_t(max_jint), "too big"); 758 } 759 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { } 760 761 ObjArrayTask& operator =(const ObjArrayTask& t) { 762 _obj = t._obj; 763 _index = t._index; 764 return *this; 765 } 766 volatile ObjArrayTask& 767 operator =(const volatile ObjArrayTask& t) volatile { 768 _obj = t._obj; 769 _index = t._index; 770 return *this; 771 } 772 773 inline oop obj() const { return _obj; } 774 inline int index() const { return _index; } 775 776 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. 777 778 private: 779 oop _obj; 780 int _index; 781 }; 782 783 #ifdef _MSC_VER 784 #pragma warning(pop) 785 #endif 786 787 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue; 788 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet; 789 790 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue; 791 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet; 792 793 794 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP