1 /*
   2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
  26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "runtime/mutex.hpp"
  31 #include "utilities/stack.hpp"
  32 #ifdef TARGET_OS_ARCH_linux_x86
  33 # include "orderAccess_linux_x86.inline.hpp"
  34 #endif
  35 #ifdef TARGET_OS_ARCH_linux_sparc
  36 # include "orderAccess_linux_sparc.inline.hpp"
  37 #endif
  38 #ifdef TARGET_OS_ARCH_linux_zero
  39 # include "orderAccess_linux_zero.inline.hpp"
  40 #endif
  41 #ifdef TARGET_OS_ARCH_solaris_x86
  42 # include "orderAccess_solaris_x86.inline.hpp"
  43 #endif
  44 #ifdef TARGET_OS_ARCH_solaris_sparc
  45 # include "orderAccess_solaris_sparc.inline.hpp"
  46 #endif
  47 #ifdef TARGET_OS_ARCH_windows_x86
  48 # include "orderAccess_windows_x86.inline.hpp"
  49 #endif
  50 #ifdef TARGET_OS_ARCH_linux_arm
  51 # include "orderAccess_linux_arm.inline.hpp"
  52 #endif
  53 #ifdef TARGET_OS_ARCH_linux_ppc
  54 # include "orderAccess_linux_ppc.inline.hpp"
  55 #endif
  56 
  57 // Simple TaskQueue stats that are collected by default in debug builds.
  58 
  59 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
  60 #define TASKQUEUE_STATS 1
  61 #elif !defined(TASKQUEUE_STATS)
  62 #define TASKQUEUE_STATS 0
  63 #endif
  64 
  65 #if TASKQUEUE_STATS
  66 #define TASKQUEUE_STATS_ONLY(code) code
  67 #else
  68 #define TASKQUEUE_STATS_ONLY(code)
  69 #endif // TASKQUEUE_STATS
  70 
  71 #if TASKQUEUE_STATS
  72 class TaskQueueStats {
  73 public:
  74   enum StatId {
  75     push,             // number of taskqueue pushes
  76     pop,              // number of taskqueue pops
  77     pop_slow,         // subset of taskqueue pops that were done slow-path
  78     steal_attempt,    // number of taskqueue steal attempts
  79     steal,            // number of taskqueue steals
  80     overflow,         // number of overflow pushes
  81     overflow_max_len, // max length of overflow stack
  82     last_stat_id
  83   };
  84 
  85 public:
  86   inline TaskQueueStats()       { reset(); }
  87 
  88   inline void record_push()     { ++_stats[push]; }
  89   inline void record_pop()      { ++_stats[pop]; }
  90   inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
  91   inline void record_steal(bool success);
  92   inline void record_overflow(size_t new_length);
  93 
  94   TaskQueueStats & operator +=(const TaskQueueStats & addend);
  95 
  96   inline size_t get(StatId id) const { return _stats[id]; }
  97   inline const size_t* get() const   { return _stats; }
  98 
  99   inline void reset();
 100 
 101   // Print the specified line of the header (does not include a line separator).
 102   static void print_header(unsigned int line, outputStream* const stream = tty,
 103                            unsigned int width = 10);
 104   // Print the statistics (does not include a line separator).
 105   void print(outputStream* const stream = tty, unsigned int width = 10) const;
 106 
 107   DEBUG_ONLY(void verify() const;)
 108 
 109 private:
 110   size_t                    _stats[last_stat_id];
 111   static const char * const _names[last_stat_id];
 112 };
 113 
 114 void TaskQueueStats::record_steal(bool success) {
 115   ++_stats[steal_attempt];
 116   if (success) ++_stats[steal];
 117 }
 118 
 119 void TaskQueueStats::record_overflow(size_t new_len) {
 120   ++_stats[overflow];
 121   if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
 122 }
 123 
 124 void TaskQueueStats::reset() {
 125   memset(_stats, 0, sizeof(_stats));
 126 }
 127 #endif // TASKQUEUE_STATS
 128 
 129 template <unsigned int N>
 130 class TaskQueueSuper: public CHeapObj {
 131 protected:
 132   // Internal type for indexing the queue; also used for the tag.
 133   typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
 134 
 135   // The first free element after the last one pushed (mod N).
 136   volatile uint _bottom;
 137 
 138   enum { MOD_N_MASK = N - 1 };
 139 
 140   class Age {
 141   public:
 142     Age(size_t data = 0)         { _data = data; }
 143     Age(const Age& age)          { _data = age._data; }
 144     Age(idx_t top, idx_t tag)    { _fields._top = top; _fields._tag = tag; }
 145 
 146     Age   get()        const volatile { return _data; }
 147     void  set(Age age) volatile       { _data = age._data; }
 148 
 149     idx_t top()        const volatile { return _fields._top; }
 150     idx_t tag()        const volatile { return _fields._tag; }
 151 
 152     // Increment top; if it wraps, increment tag also.
 153     void increment() {
 154       _fields._top = increment_index(_fields._top);
 155       if (_fields._top == 0) ++_fields._tag;
 156     }
 157 
 158     Age cmpxchg(const Age new_age, const Age old_age) volatile {
 159       return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
 160                                           (volatile intptr_t *)&_data,
 161                                           (intptr_t)old_age._data);
 162     }
 163 
 164     bool operator ==(const Age& other) const { return _data == other._data; }
 165 
 166   private:
 167     struct fields {
 168       idx_t _top;
 169       idx_t _tag;
 170     };
 171     union {
 172       size_t _data;
 173       fields _fields;
 174     };
 175   };
 176 
 177   volatile Age _age;
 178 
 179   // These both operate mod N.
 180   static uint increment_index(uint ind) {
 181     return (ind + 1) & MOD_N_MASK;
 182   }
 183   static uint decrement_index(uint ind) {
 184     return (ind - 1) & MOD_N_MASK;
 185   }
 186 
 187   // Returns a number in the range [0..N).  If the result is "N-1", it should be
 188   // interpreted as 0.
 189   uint dirty_size(uint bot, uint top) const {
 190     return (bot - top) & MOD_N_MASK;
 191   }
 192 
 193   // Returns the size corresponding to the given "bot" and "top".
 194   uint size(uint bot, uint top) const {
 195     uint sz = dirty_size(bot, top);
 196     // Has the queue "wrapped", so that bottom is less than top?  There's a
 197     // complicated special case here.  A pair of threads could perform pop_local
 198     // and pop_global operations concurrently, starting from a state in which
 199     // _bottom == _top+1.  The pop_local could succeed in decrementing _bottom,
 200     // and the pop_global in incrementing _top (in which case the pop_global
 201     // will be awarded the contested queue element.)  The resulting state must
 202     // be interpreted as an empty queue.  (We only need to worry about one such
 203     // event: only the queue owner performs pop_local's, and several concurrent
 204     // threads attempting to perform the pop_global will all perform the same
 205     // CAS, and only one can succeed.)  Any stealing thread that reads after
 206     // either the increment or decrement will see an empty queue, and will not
 207     // join the competitors.  The "sz == -1 || sz == N-1" state will not be
 208     // modified by concurrent queues, so the owner thread can reset the state to
 209     // _bottom == top so subsequent pushes will be performed normally.
 210     return (sz == N - 1) ? 0 : sz;
 211   }
 212 
 213 public:
 214   TaskQueueSuper() : _bottom(0), _age() {}
 215 
 216   // Return true if the TaskQueue contains/does not contain any tasks.
 217   bool peek()     const { return _bottom != _age.top(); }
 218   bool is_empty() const { return size() == 0; }
 219 
 220   // Return an estimate of the number of elements in the queue.
 221   // The "careful" version admits the possibility of pop_local/pop_global
 222   // races.
 223   uint size() const {
 224     return size(_bottom, _age.top());
 225   }
 226 
 227   uint dirty_size() const {
 228     return dirty_size(_bottom, _age.top());
 229   }
 230 
 231   void set_empty() {
 232     _bottom = 0;
 233     _age.set(0);
 234   }
 235 
 236   // Maximum number of elements allowed in the queue.  This is two less
 237   // than the actual queue size, for somewhat complicated reasons.
 238   uint max_elems() const { return N - 2; }
 239 
 240   // Total size of queue.
 241   static const uint total_size() { return N; }
 242 
 243   TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
 244 };
 245 
 246 template<class E, unsigned int N = TASKQUEUE_SIZE>
 247 class GenericTaskQueue: public TaskQueueSuper<N> {
 248 protected:
 249   typedef typename TaskQueueSuper<N>::Age Age;
 250   typedef typename TaskQueueSuper<N>::idx_t idx_t;
 251 
 252   using TaskQueueSuper<N>::_bottom;
 253   using TaskQueueSuper<N>::_age;
 254   using TaskQueueSuper<N>::increment_index;
 255   using TaskQueueSuper<N>::decrement_index;
 256   using TaskQueueSuper<N>::dirty_size;
 257 
 258 public:
 259   using TaskQueueSuper<N>::max_elems;
 260   using TaskQueueSuper<N>::size;
 261   TASKQUEUE_STATS_ONLY(using TaskQueueSuper<N>::stats;)
 262 
 263 private:
 264   // Slow paths for push, pop_local.  (pop_global has no fast path.)
 265   bool push_slow(E t, uint dirty_n_elems);
 266   bool pop_local_slow(uint localBot, Age oldAge);
 267 
 268 public:
 269   typedef E element_type;
 270 
 271   // Initializes the queue to empty.
 272   GenericTaskQueue();
 273 
 274   void initialize();
 275 
 276   // Push the task "t" on the queue.  Returns "false" iff the queue is full.
 277   inline bool push(E t);
 278 
 279   // Attempts to claim a task from the "local" end of the queue (the most
 280   // recently pushed).  If successful, returns true and sets t to the task;
 281   // otherwise, returns false (the queue is empty).
 282   inline bool pop_local(E& t);
 283 
 284   // Like pop_local(), but uses the "global" end of the queue (the least
 285   // recently pushed).
 286   bool pop_global(E& t);
 287 
 288   // Delete any resource associated with the queue.
 289   ~GenericTaskQueue();
 290 
 291   // apply the closure to all elements in the task queue
 292   void oops_do(OopClosure* f);
 293 
 294 private:
 295   // Element array.
 296   volatile E* _elems;
 297 };
 298 
 299 template<class E, unsigned int N>
 300 GenericTaskQueue<E, N>::GenericTaskQueue() {
 301   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 302 }
 303 
 304 template<class E, unsigned int N>
 305 void GenericTaskQueue<E, N>::initialize() {
 306   _elems = NEW_C_HEAP_ARRAY(E, N);
 307 }
 308 
 309 template<class E, unsigned int N>
 310 void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
 311   // tty->print_cr("START OopTaskQueue::oops_do");
 312   uint iters = size();
 313   uint index = _bottom;
 314   for (uint i = 0; i < iters; ++i) {
 315     index = decrement_index(index);
 316     // tty->print_cr("  doing entry %d," INTPTR_T " -> " INTPTR_T,
 317     //            index, &_elems[index], _elems[index]);
 318     E* t = (E*)&_elems[index];      // cast away volatility
 319     oop* p = (oop*)t;
 320     assert((*t)->is_oop_or_null(), "Not an oop or null");
 321     f->do_oop(p);
 322   }
 323   // tty->print_cr("END OopTaskQueue::oops_do");
 324 }
 325 
 326 template<class E, unsigned int N>
 327 bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
 328   if (dirty_n_elems == N - 1) {
 329     // Actually means 0, so do the push.
 330     uint localBot = _bottom;
 331     // g++ complains if the volatile result of the assignment is unused.
 332     const_cast<E&>(_elems[localBot] = t);
 333     OrderAccess::release_store(&_bottom, increment_index(localBot));
 334     TASKQUEUE_STATS_ONLY(stats.record_push());
 335     return true;
 336   }
 337   return false;
 338 }
 339 
 340 // pop_local_slow() is done by the owning thread and is trying to
 341 // get the last task in the queue.  It will compete with pop_global()
 342 // that will be used by other threads.  The tag age is incremented
 343 // whenever the queue goes empty which it will do here if this thread
 344 // gets the last task or in pop_global() if the queue wraps (top == 0
 345 // and pop_global() succeeds, see pop_global()).
 346 template<class E, unsigned int N>
 347 bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
 348   // This queue was observed to contain exactly one element; either this
 349   // thread will claim it, or a competing "pop_global".  In either case,
 350   // the queue will be logically empty afterwards.  Create a new Age value
 351   // that represents the empty queue for the given value of "_bottom".  (We
 352   // must also increment "tag" because of the case where "bottom == 1",
 353   // "top == 0".  A pop_global could read the queue element in that case,
 354   // then have the owner thread do a pop followed by another push.  Without
 355   // the incrementing of "tag", the pop_global's CAS could succeed,
 356   // allowing it to believe it has claimed the stale element.)
 357   Age newAge((idx_t)localBot, oldAge.tag() + 1);
 358   // Perhaps a competing pop_global has already incremented "top", in which
 359   // case it wins the element.
 360   if (localBot == oldAge.top()) {
 361     // No competing pop_global has yet incremented "top"; we'll try to
 362     // install new_age, thus claiming the element.
 363     Age tempAge = _age.cmpxchg(newAge, oldAge);
 364     if (tempAge == oldAge) {
 365       // We win.
 366       assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
 367       TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
 368       return true;
 369     }
 370   }
 371   // We lose; a completing pop_global gets the element.  But the queue is empty
 372   // and top is greater than bottom.  Fix this representation of the empty queue
 373   // to become the canonical one.
 374   _age.set(newAge);
 375   assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
 376   return false;
 377 }
 378 
 379 template<class E, unsigned int N>
 380 bool GenericTaskQueue<E, N>::pop_global(E& t) {
 381   Age oldAge = _age.get();
 382   uint localBot = _bottom;
 383   uint n_elems = size(localBot, oldAge.top());
 384   if (n_elems == 0) {
 385     return false;
 386   }
 387 
 388   const_cast<E&>(t = _elems[oldAge.top()]);
 389   Age newAge(oldAge);
 390   newAge.increment();
 391   Age resAge = _age.cmpxchg(newAge, oldAge);
 392 
 393   // Note that using "_bottom" here might fail, since a pop_local might
 394   // have decremented it.
 395   assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
 396   return resAge == oldAge;
 397 }
 398 
 399 template<class E, unsigned int N>
 400 GenericTaskQueue<E, N>::~GenericTaskQueue() {
 401   FREE_C_HEAP_ARRAY(E, _elems);
 402 }
 403 
 404 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
 405 // elements that do not fit in the TaskQueue.
 406 //
 407 // This class hides two methods from super classes:
 408 //
 409 // push() - push onto the task queue or, if that fails, onto the overflow stack
 410 // is_empty() - return true if both the TaskQueue and overflow stack are empty
 411 //
 412 // Note that size() is not hidden--it returns the number of elements in the
 413 // TaskQueue, and does not include the size of the overflow stack.  This
 414 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
 415 template<class E, unsigned int N = TASKQUEUE_SIZE>
 416 class OverflowTaskQueue: public GenericTaskQueue<E, N>
 417 {
 418 public:
 419   typedef Stack<E>               overflow_t;
 420   typedef GenericTaskQueue<E, N> taskqueue_t;
 421 
 422   TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
 423 
 424   // Push task t onto the queue or onto the overflow stack.  Return true.
 425   inline bool push(E t);
 426 
 427   // Attempt to pop from the overflow stack; return true if anything was popped.
 428   inline bool pop_overflow(E& t);
 429 
 430   inline overflow_t* overflow_stack() { return &_overflow_stack; }
 431 
 432   inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
 433   inline bool overflow_empty()  const { return _overflow_stack.is_empty(); }
 434   inline bool is_empty()        const {
 435     return taskqueue_empty() && overflow_empty();
 436   }
 437 
 438 private:
 439   overflow_t _overflow_stack;
 440 };
 441 
 442 template <class E, unsigned int N>
 443 bool OverflowTaskQueue<E, N>::push(E t)
 444 {
 445   if (!taskqueue_t::push(t)) {
 446     overflow_stack()->push(t);
 447     TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
 448   }
 449   return true;
 450 }
 451 
 452 template <class E, unsigned int N>
 453 bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
 454 {
 455   if (overflow_empty()) return false;
 456   t = overflow_stack()->pop();
 457   return true;
 458 }
 459 
 460 class TaskQueueSetSuper: public CHeapObj {
 461 protected:
 462   static int randomParkAndMiller(int* seed0);
 463 public:
 464   // Returns "true" if some TaskQueue in the set contains a task.
 465   virtual bool peek() = 0;
 466 };
 467 
 468 template<class T>
 469 class GenericTaskQueueSet: public TaskQueueSetSuper {
 470 private:
 471   uint _n;
 472   T** _queues;
 473 
 474 public:
 475   typedef typename T::element_type E;
 476 
 477   GenericTaskQueueSet(int n) : _n(n) {
 478     typedef T* GenericTaskQueuePtr;
 479     _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
 480     for (int i = 0; i < n; i++) {
 481       _queues[i] = NULL;
 482     }
 483   }
 484 
 485   bool steal_1_random(uint queue_num, int* seed, E& t);
 486   bool steal_best_of_2(uint queue_num, int* seed, E& t);
 487   bool steal_best_of_all(uint queue_num, int* seed, E& t);
 488 
 489   void register_queue(uint i, T* q);
 490 
 491   T* queue(uint n);
 492 
 493   // The thread with queue number "queue_num" (and whose random number seed is
 494   // at "seed") is trying to steal a task from some other queue.  (It may try
 495   // several queues, according to some configuration parameter.)  If some steal
 496   // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
 497   // false.
 498   bool steal(uint queue_num, int* seed, E& t);
 499 
 500   bool peek();
 501 };
 502 
 503 template<class T> void
 504 GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
 505   assert(i < _n, "index out of range.");
 506   _queues[i] = q;
 507 }
 508 
 509 template<class T> T*
 510 GenericTaskQueueSet<T>::queue(uint i) {
 511   return _queues[i];
 512 }
 513 
 514 template<class T> bool
 515 GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
 516   for (uint i = 0; i < 2 * _n; i++) {
 517     if (steal_best_of_2(queue_num, seed, t)) {
 518       TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
 519       return true;
 520     }
 521   }
 522   TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
 523   return false;
 524 }
 525 
 526 template<class T> bool
 527 GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
 528   if (_n > 2) {
 529     int best_k;
 530     uint best_sz = 0;
 531     for (uint k = 0; k < _n; k++) {
 532       if (k == queue_num) continue;
 533       uint sz = _queues[k]->size();
 534       if (sz > best_sz) {
 535         best_sz = sz;
 536         best_k = k;
 537       }
 538     }
 539     return best_sz > 0 && _queues[best_k]->pop_global(t);
 540   } else if (_n == 2) {
 541     // Just try the other one.
 542     int k = (queue_num + 1) % 2;
 543     return _queues[k]->pop_global(t);
 544   } else {
 545     assert(_n == 1, "can't be zero.");
 546     return false;
 547   }
 548 }
 549 
 550 template<class T> bool
 551 GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
 552   if (_n > 2) {
 553     uint k = queue_num;
 554     while (k == queue_num) k = randomParkAndMiller(seed) % _n;
 555     return _queues[2]->pop_global(t);
 556   } else if (_n == 2) {
 557     // Just try the other one.
 558     int k = (queue_num + 1) % 2;
 559     return _queues[k]->pop_global(t);
 560   } else {
 561     assert(_n == 1, "can't be zero.");
 562     return false;
 563   }
 564 }
 565 
 566 template<class T> bool
 567 GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
 568   if (_n > 2) {
 569     uint k1 = queue_num;
 570     while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
 571     uint k2 = queue_num;
 572     while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
 573     // Sample both and try the larger.
 574     uint sz1 = _queues[k1]->size();
 575     uint sz2 = _queues[k2]->size();
 576     if (sz2 > sz1) return _queues[k2]->pop_global(t);
 577     else return _queues[k1]->pop_global(t);
 578   } else if (_n == 2) {
 579     // Just try the other one.
 580     uint k = (queue_num + 1) % 2;
 581     return _queues[k]->pop_global(t);
 582   } else {
 583     assert(_n == 1, "can't be zero.");
 584     return false;
 585   }
 586 }
 587 
 588 template<class T>
 589 bool GenericTaskQueueSet<T>::peek() {
 590   // Try all the queues.
 591   for (uint j = 0; j < _n; j++) {
 592     if (_queues[j]->peek())
 593       return true;
 594   }
 595   return false;
 596 }
 597 
 598 // When to terminate from the termination protocol.
 599 class TerminatorTerminator: public CHeapObj {
 600 public:
 601   virtual bool should_exit_termination() = 0;
 602 };
 603 
 604 // A class to aid in the termination of a set of parallel tasks using
 605 // TaskQueueSet's for work stealing.
 606 
 607 #undef TRACESPINNING
 608 
 609 class ParallelTaskTerminator: public StackObj {
 610 private:
 611   int _n_threads;
 612   TaskQueueSetSuper* _queue_set;
 613   int _offered_termination;
 614 
 615 #ifdef TRACESPINNING
 616   static uint _total_yields;
 617   static uint _total_spins;
 618   static uint _total_peeks;
 619 #endif
 620 
 621   bool peek_in_queue_set();
 622 protected:
 623   virtual void yield();
 624   void sleep(uint millis);
 625 
 626 public:
 627 
 628   // "n_threads" is the number of threads to be terminated.  "queue_set" is a
 629   // queue sets of work queues of other threads.
 630   ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
 631 
 632   // The current thread has no work, and is ready to terminate if everyone
 633   // else is.  If returns "true", all threads are terminated.  If returns
 634   // "false", available work has been observed in one of the task queues,
 635   // so the global task is not complete.
 636   bool offer_termination() {
 637     return offer_termination(NULL);
 638   }
 639 
 640   // As above, but it also terminates if the should_exit_termination()
 641   // method of the terminator parameter returns true. If terminator is
 642   // NULL, then it is ignored.
 643   bool offer_termination(TerminatorTerminator* terminator);
 644 
 645   // Reset the terminator, so that it may be reused again.
 646   // The caller is responsible for ensuring that this is done
 647   // in an MT-safe manner, once the previous round of use of
 648   // the terminator is finished.
 649   void reset_for_reuse();
 650   // Same as above but the number of parallel threads is set to the
 651   // given number.
 652   void reset_for_reuse(int n_threads);
 653 
 654 #ifdef TRACESPINNING
 655   static uint total_yields() { return _total_yields; }
 656   static uint total_spins() { return _total_spins; }
 657   static uint total_peeks() { return _total_peeks; }
 658   static void print_termination_counts();
 659 #endif
 660 };
 661 
 662 template<class E, unsigned int N> inline bool
 663 GenericTaskQueue<E, N>::push(E t) {
 664   uint localBot = _bottom;
 665   assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
 666   idx_t top = _age.top();
 667   uint dirty_n_elems = dirty_size(localBot, top);
 668   assert(dirty_n_elems < N, "n_elems out of range.");
 669   if (dirty_n_elems < max_elems()) {
 670     // g++ complains if the volatile result of the assignment is unused.
 671     const_cast<E&>(_elems[localBot] = t);
 672     OrderAccess::release_store(&_bottom, increment_index(localBot));
 673     TASKQUEUE_STATS_ONLY(stats.record_push());
 674     return true;
 675   } else {
 676     return push_slow(t, dirty_n_elems);
 677   }
 678 }
 679 
 680 template<class E, unsigned int N> inline bool
 681 GenericTaskQueue<E, N>::pop_local(E& t) {
 682   uint localBot = _bottom;
 683   // This value cannot be N-1.  That can only occur as a result of
 684   // the assignment to bottom in this method.  If it does, this method
 685   // resets the size to 0 before the next call (which is sequential,
 686   // since this is pop_local.)
 687   uint dirty_n_elems = dirty_size(localBot, _age.top());
 688   assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
 689   if (dirty_n_elems == 0) return false;
 690   localBot = decrement_index(localBot);
 691   _bottom = localBot;
 692   // This is necessary to prevent any read below from being reordered
 693   // before the store just above.
 694   OrderAccess::fence();
 695   const_cast<E&>(t = _elems[localBot]);
 696   // This is a second read of "age"; the "size()" above is the first.
 697   // If there's still at least one element in the queue, based on the
 698   // "_bottom" and "age" we've read, then there can be no interference with
 699   // a "pop_global" operation, and we're done.
 700   idx_t tp = _age.top();    // XXX
 701   if (size(localBot, tp) > 0) {
 702     assert(dirty_size(localBot, tp) != N - 1, "sanity");
 703     TASKQUEUE_STATS_ONLY(stats.record_pop());
 704     return true;
 705   } else {
 706     // Otherwise, the queue contained exactly one element; we take the slow
 707     // path.
 708     return pop_local_slow(localBot, _age.get());
 709   }
 710 }
 711 
 712 typedef GenericTaskQueue<oop>             OopTaskQueue;
 713 typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
 714 
 715 #ifdef _MSC_VER
 716 #pragma warning(push)
 717 // warning C4522: multiple assignment operators specified
 718 #pragma warning(disable:4522)
 719 #endif
 720 
 721 // This is a container class for either an oop* or a narrowOop*.
 722 // Both are pushed onto a task queue and the consumer will test is_narrow()
 723 // to determine which should be processed.
 724 class StarTask {
 725   void*  _holder;        // either union oop* or narrowOop*
 726 
 727   enum { COMPRESSED_OOP_MASK = 1 };
 728 
 729  public:
 730   StarTask(narrowOop* p) {
 731     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
 732     _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
 733   }
 734   StarTask(oop* p)       {
 735     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
 736     _holder = (void*)p;
 737   }
 738   StarTask()             { _holder = NULL; }
 739   operator oop*()        { return (oop*)_holder; }
 740   operator narrowOop*()  {
 741     return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
 742   }
 743 
 744   StarTask& operator=(const StarTask& t) {
 745     _holder = t._holder;
 746     return *this;
 747   }
 748   volatile StarTask& operator=(const volatile StarTask& t) volatile {
 749     _holder = t._holder;
 750     return *this;
 751   }
 752 
 753   bool is_narrow() const {
 754     return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
 755   }
 756 };
 757 
 758 class ObjArrayTask
 759 {
 760 public:
 761   ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
 762   ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
 763     assert(idx <= size_t(max_jint), "too big");
 764   }
 765   ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
 766 
 767   ObjArrayTask& operator =(const ObjArrayTask& t) {
 768     _obj = t._obj;
 769     _index = t._index;
 770     return *this;
 771   }
 772   volatile ObjArrayTask&
 773   operator =(const volatile ObjArrayTask& t) volatile {
 774     _obj = t._obj;
 775     _index = t._index;
 776     return *this;
 777   }
 778 
 779   inline oop obj()   const { return _obj; }
 780   inline int index() const { return _index; }
 781 
 782   DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
 783 
 784 private:
 785   oop _obj;
 786   int _index;
 787 };
 788 
 789 #ifdef _MSC_VER
 790 #pragma warning(pop)
 791 #endif
 792 
 793 typedef OverflowTaskQueue<StarTask>           OopStarTaskQueue;
 794 typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
 795 
 796 typedef OverflowTaskQueue<size_t>             RegionTaskQueue;
 797 typedef GenericTaskQueueSet<RegionTaskQueue>  RegionTaskQueueSet;
 798 
 799 
 800 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP