320 _elems = _array_allocator.allocate(N); 321 } 322 323 template<class E, MEMFLAGS F, unsigned int N> 324 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { 325 // tty->print_cr("START OopTaskQueue::oops_do"); 326 uint iters = size(); 327 uint index = _bottom; 328 for (uint i = 0; i < iters; ++i) { 329 index = decrement_index(index); 330 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, 331 // index, &_elems[index], _elems[index]); 332 E* t = (E*)&_elems[index]; // cast away volatility 333 oop* p = (oop*)t; 334 assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t))); 335 f->do_oop(p); 336 } 337 // tty->print_cr("END OopTaskQueue::oops_do"); 338 } 339 340 template<class E, MEMFLAGS F, unsigned int N> 341 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) { 342 if (dirty_n_elems == N - 1) { 343 // Actually means 0, so do the push. 344 uint localBot = _bottom; 345 // g++ complains if the volatile result of the assignment is 346 // unused, so we cast the volatile away. We cannot cast directly 347 // to void, because gcc treats that as not using the result of the 348 // assignment. However, casting to E& means that we trigger an 349 // unused-value warning. So, we cast the E& to void. 350 (void)const_cast<E&>(_elems[localBot] = t); 351 OrderAccess::release_store(&_bottom, increment_index(localBot)); 352 TASKQUEUE_STATS_ONLY(stats.record_push()); 353 return true; 354 } 355 return false; 356 } 357 358 // pop_local_slow() is done by the owning thread and is trying to 359 // get the last task in the queue. It will compete with pop_global() 360 // that will be used by other threads. The tag age is incremented 361 // whenever the queue goes empty which it will do here if this thread 362 // gets the last task or in pop_global() if the queue wraps (top == 0 363 // and pop_global() succeeds, see pop_global()). 364 template<class E, MEMFLAGS F, unsigned int N> 365 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { 366 // This queue was observed to contain exactly one element; either this 367 // thread will claim it, or a competing "pop_global". In either case, 368 // the queue will be logically empty afterwards. Create a new Age value 369 // that represents the empty queue for the given value of "_bottom". (We 370 // must also increment "tag" because of the case where "bottom == 1", 371 // "top == 0". A pop_global could read the queue element in that case, 372 // then have the owner thread do a pop followed by another push. Without 373 // the incrementing of "tag", the pop_global's CAS could succeed, 374 // allowing it to believe it has claimed the stale element.) 375 Age newAge((idx_t)localBot, oldAge.tag() + 1); 376 // Perhaps a competing pop_global has already incremented "top", in which 377 // case it wins the element. 452 453 // Push task t onto the queue or onto the overflow stack. Return true. 454 inline bool push(E t); 455 456 // Attempt to pop from the overflow stack; return true if anything was popped. 457 inline bool pop_overflow(E& t); 458 459 inline overflow_t* overflow_stack() { return &_overflow_stack; } 460 461 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } 462 inline bool overflow_empty() const { return _overflow_stack.is_empty(); } 463 inline bool is_empty() const { 464 return taskqueue_empty() && overflow_empty(); 465 } 466 467 private: 468 overflow_t _overflow_stack; 469 }; 470 471 template <class E, MEMFLAGS F, unsigned int N> 472 bool OverflowTaskQueue<E, F, N>::push(E t) 473 { 474 if (!taskqueue_t::push(t)) { 475 overflow_stack()->push(t); 476 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); 477 } 478 return true; 479 } 480 481 template <class E, MEMFLAGS F, unsigned int N> 482 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) 483 { 484 if (overflow_empty()) return false; 485 t = overflow_stack()->pop(); 486 return true; 487 } 488 489 class TaskQueueSetSuper { 490 protected: 491 static int randomParkAndMiller(int* seed0); 492 public: 493 // Returns "true" if some TaskQueue in the set contains a task. 494 virtual bool peek() = 0; 495 }; 496 497 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { 498 }; 499 500 template<class T, MEMFLAGS F> 501 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { 631 // method of the terminator parameter returns true. If terminator is 632 // NULL, then it is ignored. 633 bool offer_termination(TerminatorTerminator* terminator); 634 635 // Reset the terminator, so that it may be reused again. 636 // The caller is responsible for ensuring that this is done 637 // in an MT-safe manner, once the previous round of use of 638 // the terminator is finished. 639 void reset_for_reuse(); 640 // Same as above but the number of parallel threads is set to the 641 // given number. 642 void reset_for_reuse(int n_threads); 643 644 #ifdef TRACESPINNING 645 static uint total_yields() { return _total_yields; } 646 static uint total_spins() { return _total_spins; } 647 static uint total_peeks() { return _total_peeks; } 648 static void print_termination_counts(); 649 #endif 650 }; 651 652 template<class E, MEMFLAGS F, unsigned int N> inline bool 653 GenericTaskQueue<E, F, N>::push(E t) { 654 uint localBot = _bottom; 655 assert(localBot < N, "_bottom out of range."); 656 idx_t top = _age.top(); 657 uint dirty_n_elems = dirty_size(localBot, top); 658 assert(dirty_n_elems < N, "n_elems out of range."); 659 if (dirty_n_elems < max_elems()) { 660 // g++ complains if the volatile result of the assignment is 661 // unused, so we cast the volatile away. We cannot cast directly 662 // to void, because gcc treats that as not using the result of the 663 // assignment. However, casting to E& means that we trigger an 664 // unused-value warning. So, we cast the E& to void. 665 (void) const_cast<E&>(_elems[localBot] = t); 666 OrderAccess::release_store(&_bottom, increment_index(localBot)); 667 TASKQUEUE_STATS_ONLY(stats.record_push()); 668 return true; 669 } else { 670 return push_slow(t, dirty_n_elems); 671 } 672 } 673 674 template<class E, MEMFLAGS F, unsigned int N> inline bool 675 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) { 676 uint localBot = _bottom; 677 // This value cannot be N-1. That can only occur as a result of 678 // the assignment to bottom in this method. If it does, this method 679 // resets the size to 0 before the next call (which is sequential, 680 // since this is pop_local.) 681 uint dirty_n_elems = dirty_size(localBot, _age.top()); 682 assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); 683 if (dirty_n_elems == 0) return false; 684 localBot = decrement_index(localBot); 685 _bottom = localBot; 686 // This is necessary to prevent any read below from being reordered 687 // before the store just above. 688 OrderAccess::fence(); 689 // g++ complains if the volatile result of the assignment is 690 // unused, so we cast the volatile away. We cannot cast directly 691 // to void, because gcc treats that as not using the result of the 692 // assignment. However, casting to E& means that we trigger an | 320 _elems = _array_allocator.allocate(N); 321 } 322 323 template<class E, MEMFLAGS F, unsigned int N> 324 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { 325 // tty->print_cr("START OopTaskQueue::oops_do"); 326 uint iters = size(); 327 uint index = _bottom; 328 for (uint i = 0; i < iters; ++i) { 329 index = decrement_index(index); 330 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, 331 // index, &_elems[index], _elems[index]); 332 E* t = (E*)&_elems[index]; // cast away volatility 333 oop* p = (oop*)t; 334 assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t))); 335 f->do_oop(p); 336 } 337 // tty->print_cr("END OopTaskQueue::oops_do"); 338 } 339 340 // pop_local_slow() is done by the owning thread and is trying to 341 // get the last task in the queue. It will compete with pop_global() 342 // that will be used by other threads. The tag age is incremented 343 // whenever the queue goes empty which it will do here if this thread 344 // gets the last task or in pop_global() if the queue wraps (top == 0 345 // and pop_global() succeeds, see pop_global()). 346 template<class E, MEMFLAGS F, unsigned int N> 347 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { 348 // This queue was observed to contain exactly one element; either this 349 // thread will claim it, or a competing "pop_global". In either case, 350 // the queue will be logically empty afterwards. Create a new Age value 351 // that represents the empty queue for the given value of "_bottom". (We 352 // must also increment "tag" because of the case where "bottom == 1", 353 // "top == 0". A pop_global could read the queue element in that case, 354 // then have the owner thread do a pop followed by another push. Without 355 // the incrementing of "tag", the pop_global's CAS could succeed, 356 // allowing it to believe it has claimed the stale element.) 357 Age newAge((idx_t)localBot, oldAge.tag() + 1); 358 // Perhaps a competing pop_global has already incremented "top", in which 359 // case it wins the element. 434 435 // Push task t onto the queue or onto the overflow stack. Return true. 436 inline bool push(E t); 437 438 // Attempt to pop from the overflow stack; return true if anything was popped. 439 inline bool pop_overflow(E& t); 440 441 inline overflow_t* overflow_stack() { return &_overflow_stack; } 442 443 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } 444 inline bool overflow_empty() const { return _overflow_stack.is_empty(); } 445 inline bool is_empty() const { 446 return taskqueue_empty() && overflow_empty(); 447 } 448 449 private: 450 overflow_t _overflow_stack; 451 }; 452 453 template <class E, MEMFLAGS F, unsigned int N> 454 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) 455 { 456 if (overflow_empty()) return false; 457 t = overflow_stack()->pop(); 458 return true; 459 } 460 461 class TaskQueueSetSuper { 462 protected: 463 static int randomParkAndMiller(int* seed0); 464 public: 465 // Returns "true" if some TaskQueue in the set contains a task. 466 virtual bool peek() = 0; 467 }; 468 469 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { 470 }; 471 472 template<class T, MEMFLAGS F> 473 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { 603 // method of the terminator parameter returns true. If terminator is 604 // NULL, then it is ignored. 605 bool offer_termination(TerminatorTerminator* terminator); 606 607 // Reset the terminator, so that it may be reused again. 608 // The caller is responsible for ensuring that this is done 609 // in an MT-safe manner, once the previous round of use of 610 // the terminator is finished. 611 void reset_for_reuse(); 612 // Same as above but the number of parallel threads is set to the 613 // given number. 614 void reset_for_reuse(int n_threads); 615 616 #ifdef TRACESPINNING 617 static uint total_yields() { return _total_yields; } 618 static uint total_spins() { return _total_spins; } 619 static uint total_peeks() { return _total_peeks; } 620 static void print_termination_counts(); 621 #endif 622 }; 623 624 template<class E, MEMFLAGS F, unsigned int N> inline bool 625 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) { 626 uint localBot = _bottom; 627 // This value cannot be N-1. That can only occur as a result of 628 // the assignment to bottom in this method. If it does, this method 629 // resets the size to 0 before the next call (which is sequential, 630 // since this is pop_local.) 631 uint dirty_n_elems = dirty_size(localBot, _age.top()); 632 assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); 633 if (dirty_n_elems == 0) return false; 634 localBot = decrement_index(localBot); 635 _bottom = localBot; 636 // This is necessary to prevent any read below from being reordered 637 // before the store just above. 638 OrderAccess::fence(); 639 // g++ complains if the volatile result of the assignment is 640 // unused, so we cast the volatile away. We cannot cast directly 641 // to void, because gcc treats that as not using the result of the 642 // assignment. However, casting to E& means that we trigger an |