1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_TASKQUEUE_HPP
  26 #define SHARE_VM_GC_SHARED_TASKQUEUE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "oops/oopsHierarchy.hpp"
  30 #include "utilities/ostream.hpp"
  31 #include "utilities/stack.hpp"
  32 
  33 // Simple TaskQueue stats that are collected by default in debug builds.
  34 
  35 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
  36 #define TASKQUEUE_STATS 1
  37 #elif !defined(TASKQUEUE_STATS)
  38 #define TASKQUEUE_STATS 0
  39 #endif
  40 
  41 #if TASKQUEUE_STATS
  42 #define TASKQUEUE_STATS_ONLY(code) code
  43 #else
  44 #define TASKQUEUE_STATS_ONLY(code)
  45 #endif // TASKQUEUE_STATS
  46 
  47 #if TASKQUEUE_STATS
  48 class TaskQueueStats {
  49 public:
  50   enum StatId {
  51     push,             // number of taskqueue pushes
  52     pop,              // number of taskqueue pops
  53     pop_slow,         // subset of taskqueue pops that were done slow-path
  54     steal_attempt,    // number of taskqueue steal attempts
  55     steal,            // number of taskqueue steals
  56     overflow,         // number of overflow pushes
  57     overflow_max_len, // max length of overflow stack
  58     last_stat_id
  59   };
  60 
  61 public:
  62   inline TaskQueueStats()       { reset(); }
  63 
  64   inline void record_push()     { ++_stats[push]; }
  65   inline void record_pop()      { ++_stats[pop]; }
  66   inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
  67   inline void record_steal(bool success);
  68   inline void record_overflow(size_t new_length);
  69 
  70   TaskQueueStats & operator +=(const TaskQueueStats & addend);
  71 
  72   inline size_t get(StatId id) const { return _stats[id]; }
  73   inline const size_t* get() const   { return _stats; }
  74 
  75   inline void reset();
  76 
  77   // Print the specified line of the header (does not include a line separator).
  78   static void print_header(unsigned int line, outputStream* const stream = tty,
  79                            unsigned int width = 10);
  80   // Print the statistics (does not include a line separator).
  81   void print(outputStream* const stream = tty, unsigned int width = 10) const;
  82 
  83   DEBUG_ONLY(void verify() const;)
  84 
  85 private:
  86   size_t                    _stats[last_stat_id];
  87   static const char * const _names[last_stat_id];
  88 };
  89 
  90 void TaskQueueStats::record_steal(bool success) {
  91   ++_stats[steal_attempt];
  92   if (success) ++_stats[steal];
  93 }
  94 
  95 void TaskQueueStats::record_overflow(size_t new_len) {
  96   ++_stats[overflow];
  97   if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
  98 }
  99 
 100 void TaskQueueStats::reset() {
 101   memset(_stats, 0, sizeof(_stats));
 102 }
 103 #endif // TASKQUEUE_STATS
 104 
 105 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
 106 
 107 template <unsigned int N, MEMFLAGS F>
 108 class TaskQueueSuper: public CHeapObj<F> {
 109 protected:
 110   // Internal type for indexing the queue; also used for the tag.
 111   typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
 112 
 113   // The first free element after the last one pushed (mod N).
 114   volatile uint _bottom;
 115 
 116   enum { MOD_N_MASK = N - 1 };
 117 
 118   class Age {
 119   public:
 120     Age(size_t data = 0)         { _data = data; }
 121     Age(const Age& age)          { _data = age._data; }
 122     Age(idx_t top, idx_t tag)    { _fields._top = top; _fields._tag = tag; }
 123 
 124     Age   get()        const volatile { return _data; }
 125     void  set(Age age) volatile       { _data = age._data; }
 126 
 127     idx_t top()        const volatile { return _fields._top; }
 128     idx_t tag()        const volatile { return _fields._tag; }
 129 
 130     // Increment top; if it wraps, increment tag also.
 131     void increment() {
 132       _fields._top = increment_index(_fields._top);
 133       if (_fields._top == 0) ++_fields._tag;
 134     }
 135 
 136     Age cmpxchg(const Age new_age, const Age old_age) volatile;
 137 
 138     bool operator ==(const Age& other) const { return _data == other._data; }
 139 
 140   private:
 141     struct fields {
 142       idx_t _top;
 143       idx_t _tag;
 144     };
 145     union {
 146       size_t _data;
 147       fields _fields;
 148     };
 149   };
 150 
 151   volatile Age _age;
 152 
 153   // These both operate mod N.
 154   static uint increment_index(uint ind) {
 155     return (ind + 1) & MOD_N_MASK;
 156   }
 157   static uint decrement_index(uint ind) {
 158     return (ind - 1) & MOD_N_MASK;
 159   }
 160 
 161   // Returns a number in the range [0..N).  If the result is "N-1", it should be
 162   // interpreted as 0.
 163   uint dirty_size(uint bot, uint top) const {
 164     return (bot - top) & MOD_N_MASK;
 165   }
 166 
 167   // Returns the size corresponding to the given "bot" and "top".
 168   uint size(uint bot, uint top) const {
 169     uint sz = dirty_size(bot, top);
 170     // Has the queue "wrapped", so that bottom is less than top?  There's a
 171     // complicated special case here.  A pair of threads could perform pop_local
 172     // and pop_global operations concurrently, starting from a state in which
 173     // _bottom == _top+1.  The pop_local could succeed in decrementing _bottom,
 174     // and the pop_global in incrementing _top (in which case the pop_global
 175     // will be awarded the contested queue element.)  The resulting state must
 176     // be interpreted as an empty queue.  (We only need to worry about one such
 177     // event: only the queue owner performs pop_local's, and several concurrent
 178     // threads attempting to perform the pop_global will all perform the same
 179     // CAS, and only one can succeed.)  Any stealing thread that reads after
 180     // either the increment or decrement will see an empty queue, and will not
 181     // join the competitors.  The "sz == -1 || sz == N-1" state will not be
 182     // modified by concurrent queues, so the owner thread can reset the state to
 183     // _bottom == top so subsequent pushes will be performed normally.
 184     return (sz == N - 1) ? 0 : sz;
 185   }
 186 
 187 public:
 188   TaskQueueSuper() : _bottom(0), _age() {}
 189 
 190   // Return true if the TaskQueue contains/does not contain any tasks.
 191   bool peek()     const { return _bottom != _age.top(); }
 192   bool is_empty() const { return size() == 0; }
 193 
 194   // Return an estimate of the number of elements in the queue.
 195   // The "careful" version admits the possibility of pop_local/pop_global
 196   // races.
 197   uint size() const {
 198     return size(_bottom, _age.top());
 199   }
 200 
 201   uint dirty_size() const {
 202     return dirty_size(_bottom, _age.top());
 203   }
 204 
 205   void set_empty() {
 206     _bottom = 0;
 207     _age.set(0);
 208   }
 209 
 210   // Maximum number of elements allowed in the queue.  This is two less
 211   // than the actual queue size, for somewhat complicated reasons.
 212   uint max_elems() const { return N - 2; }
 213 
 214   // Total size of queue.
 215   static const uint total_size() { return N; }
 216 
 217   TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
 218 };
 219 
 220 //
 221 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
 222 // ended-queue (deque), intended for use in work stealing. Queue operations
 223 // are non-blocking.
 224 //
 225 // A queue owner thread performs push() and pop_local() operations on one end
 226 // of the queue, while other threads may steal work using the pop_global()
 227 // method.
 228 //
 229 // The main difference to the original algorithm is that this
 230 // implementation allows wrap-around at the end of its allocated
 231 // storage, which is an array.
 232 //
 233 // The original paper is:
 234 //
 235 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
 236 // Thread scheduling for multiprogrammed multiprocessors.
 237 // Theory of Computing Systems 34, 2 (2001), 115-144.
 238 //
 239 // The following paper provides an correctness proof and an
 240 // implementation for weakly ordered memory models including (pseudo-)
 241 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is
 242 // similar to ABP, with the main difference that it allows resizing of the
 243 // underlying storage:
 244 //
 245 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
 246 // Correct and efficient work-stealing for weak memory models
 247 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and
 248 // practice of parallel programming (PPoPP 2013), 69-80
 249 //
 250 
 251 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
 252 class GenericTaskQueue: public TaskQueueSuper<N, F> {
 253 protected:
 254   typedef typename TaskQueueSuper<N, F>::Age Age;
 255   typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
 256 
 257   using TaskQueueSuper<N, F>::_bottom;
 258   using TaskQueueSuper<N, F>::_age;
 259   using TaskQueueSuper<N, F>::increment_index;
 260   using TaskQueueSuper<N, F>::decrement_index;
 261   using TaskQueueSuper<N, F>::dirty_size;
 262 
 263 public:
 264   using TaskQueueSuper<N, F>::max_elems;
 265   using TaskQueueSuper<N, F>::size;
 266 
 267 #if  TASKQUEUE_STATS
 268   using TaskQueueSuper<N, F>::stats;
 269 #endif
 270 
 271 private:
 272   // Slow paths for push, pop_local.  (pop_global has no fast path.)
 273   bool push_slow(E t, uint dirty_n_elems);
 274   bool pop_local_slow(uint localBot, Age oldAge);
 275 
 276 public:
 277   typedef E element_type;
 278 
 279   // Initializes the queue to empty.
 280   GenericTaskQueue();
 281 
 282   void initialize();
 283 
 284   // Push the task "t" on the queue.  Returns "false" iff the queue is full.
 285   inline bool push(E t);
 286 
 287   // Attempts to claim a task from the "local" end of the queue (the most
 288   // recently pushed) as long as the number of entries exceeds the threshold.
 289   // If successful, returns true and sets t to the task; otherwise, returns false
 290   // (the queue is empty or the number of elements below the threshold).
 291   inline bool pop_local(volatile E& t, uint threshold = 0);
 292 
 293   // Like pop_local(), but uses the "global" end of the queue (the least
 294   // recently pushed).
 295   bool pop_global(volatile E& t);
 296 
 297   // Delete any resource associated with the queue.
 298   ~GenericTaskQueue();
 299 
 300   // Apply fn to each element in the task queue.  The queue must not
 301   // be modified while iterating.
 302   template<typename Fn> void iterate(Fn fn);
 303 
 304 private:
 305   // Element array.
 306   volatile E* _elems;
 307 };
 308 
 309 template<class E, MEMFLAGS F, unsigned int N>
 310 GenericTaskQueue<E, F, N>::GenericTaskQueue() {
 311   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 312 }
 313 
 314 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
 315 // elements that do not fit in the TaskQueue.
 316 //
 317 // This class hides two methods from super classes:
 318 //
 319 // push() - push onto the task queue or, if that fails, onto the overflow stack
 320 // is_empty() - return true if both the TaskQueue and overflow stack are empty
 321 //
 322 // Note that size() is not hidden--it returns the number of elements in the
 323 // TaskQueue, and does not include the size of the overflow stack.  This
 324 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
 325 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
 326 class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
 327 {
 328 public:
 329   typedef Stack<E, F>               overflow_t;
 330   typedef GenericTaskQueue<E, F, N> taskqueue_t;
 331 
 332   TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
 333 
 334   // Push task t onto the queue or onto the overflow stack.  Return true.
 335   inline bool push(E t);
 336   // Try to push task t onto the queue only. Returns true if successful, false otherwise.
 337   inline bool try_push_to_taskqueue(E t);
 338 
 339   // Attempt to pop from the overflow stack; return true if anything was popped.
 340   inline bool pop_overflow(E& t);
 341 
 342   inline overflow_t* overflow_stack() { return &_overflow_stack; }
 343 
 344   inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
 345   inline bool overflow_empty()  const { return _overflow_stack.is_empty(); }
 346   inline bool is_empty()        const {
 347     return taskqueue_empty() && overflow_empty();
 348   }
 349 
 350 private:
 351   overflow_t _overflow_stack;
 352 };
 353 
 354 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
 355 class BufferedOverflowTaskQueue: public OverflowTaskQueue<E, F, N>
 356 {
 357 public:
 358   typedef OverflowTaskQueue<E, F, N> taskqueue_t;
 359 
 360   BufferedOverflowTaskQueue() : _buf_empty(true) {};
 361 
 362   TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
 363 
 364   // Push task t onto:
 365   //   - first, try buffer;
 366   //   - then, try the queue;
 367   //   - then, overflow stack.
 368   // Return true.
 369   inline bool push(E t);
 370 
 371   // Attempt to pop from the buffer; return true if anything was popped.
 372   inline bool pop_buffer(E &t);
 373 
 374   inline void clear_buffer()  { _buf_empty = true; }
 375   inline bool buffer_empty()  const { return _buf_empty; }
 376   inline bool is_empty()        const {
 377     return taskqueue_t::is_empty() && buffer_empty();
 378   }
 379 
 380 private:
 381   bool _buf_empty;
 382   E _elem;
 383 };
 384 
 385 class TaskQueueSetSuper {
 386 protected:
 387   static int randomParkAndMiller(int* seed0);
 388 public:
 389   // Returns "true" if some TaskQueue in the set contains a task.
 390   virtual bool   peek() = 0;
 391   virtual size_t tasks() = 0;
 392 };
 393 
 394 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
 395 };
 396 
 397 template<class T, MEMFLAGS F>
 398 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
 399 private:
 400   uint _n;
 401   T** _queues;
 402 
 403 public:
 404   typedef typename T::element_type E;
 405 
 406   GenericTaskQueueSet(int n);
 407   ~GenericTaskQueueSet();
 408 
 409   bool steal_best_of_2(uint queue_num, int* seed, E& t);
 410 
 411   void register_queue(uint i, T* q);
 412 
 413   T* queue(uint n);
 414 
 415   // The thread with queue number "queue_num" (and whose random number seed is
 416   // at "seed") is trying to steal a task from some other queue.  (It may try
 417   // several queues, according to some configuration parameter.)  If some steal
 418   // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
 419   // false.
 420   bool steal(uint queue_num, int* seed, E& t);
 421 
 422   bool peek();
 423   size_t tasks();
 424 
 425   uint size() const { return _n; }
 426 };
 427 
 428 template<class T, MEMFLAGS F> void
 429 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
 430   assert(i < _n, "index out of range.");
 431   _queues[i] = q;
 432 }
 433 
 434 template<class T, MEMFLAGS F> T*
 435 GenericTaskQueueSet<T, F>::queue(uint i) {
 436   return _queues[i];
 437 }
 438 
 439 template<class T, MEMFLAGS F>
 440 bool GenericTaskQueueSet<T, F>::peek() {
 441   // Try all the queues.
 442   for (uint j = 0; j < _n; j++) {
 443     if (_queues[j]->peek())
 444       return true;
 445   }
 446   return false;
 447 }
 448 
 449 template<class T, MEMFLAGS F>
 450 size_t GenericTaskQueueSet<T, F>::tasks() {
 451   size_t n = 0;
 452   for (uint j = 0; j < _n; j++) {
 453     n += _queues[j]->size();
 454   }
 455   return n;
 456 }
 457 
 458 
 459 // When to terminate from the termination protocol.
 460 class TerminatorTerminator: public CHeapObj<mtInternal> {
 461 public:
 462   virtual bool should_exit_termination() = 0;
 463   virtual bool should_force_termination() { return false; }
 464 };
 465 
 466 // A class to aid in the termination of a set of parallel tasks using
 467 // TaskQueueSet's for work stealing.
 468 
 469 #undef TRACESPINNING
 470 
 471 class ParallelTaskTerminator: public StackObj {
 472 protected:
 473   uint _n_threads;
 474   TaskQueueSetSuper* _queue_set;
 475   volatile uint _offered_termination;
 476 
 477 #ifdef TRACESPINNING
 478   static uint _total_yields;
 479   static uint _total_spins;
 480   static uint _total_peeks;
 481 #endif
 482 
 483   bool peek_in_queue_set();
 484 protected:
 485   virtual void yield();
 486   void sleep(uint millis);
 487 
 488 public:
 489 
 490   // "n_threads" is the number of threads to be terminated.  "queue_set" is a
 491   // queue sets of work queues of other threads.
 492   ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
 493 
 494   // The current thread has no work, and is ready to terminate if everyone
 495   // else is.  If returns "true", all threads are terminated.  If returns
 496   // "false", available work has been observed in one of the task queues,
 497   // so the global task is not complete.
 498   // If force is set to true, it terminates even if there's remaining work left
 499   bool offer_termination() {
 500     return offer_termination(NULL);
 501   }
 502 
 503   // As above, but it also terminates if the should_exit_termination()
 504   // method of the terminator parameter returns true. If terminator is
 505   // NULL, then it is ignored.
 506   // If force is set to true, it terminates even if there's remaining work left
 507   virtual bool offer_termination(TerminatorTerminator* terminator);
 508 
 509   // Reset the terminator, so that it may be reused again.
 510   // The caller is responsible for ensuring that this is done
 511   // in an MT-safe manner, once the previous round of use of
 512   // the terminator is finished.
 513   void reset_for_reuse();
 514   // Same as above but the number of parallel threads is set to the
 515   // given number.
 516   void reset_for_reuse(uint n_threads);
 517 
 518 #ifdef TRACESPINNING
 519   static uint total_yields() { return _total_yields; }
 520   static uint total_spins() { return _total_spins; }
 521   static uint total_peeks() { return _total_peeks; }
 522   static void print_termination_counts();
 523 #endif
 524 };
 525 
 526 typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
 527 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
 528 
 529 #ifdef _MSC_VER
 530 #pragma warning(push)
 531 // warning C4522: multiple assignment operators specified
 532 #pragma warning(disable:4522)
 533 #endif
 534 
 535 // This is a container class for either an oop* or a narrowOop*.
 536 // Both are pushed onto a task queue and the consumer will test is_narrow()
 537 // to determine which should be processed.
 538 class StarTask {
 539   void*  _holder;        // either union oop* or narrowOop*
 540 
 541   enum { COMPRESSED_OOP_MASK = 1 };
 542 
 543  public:
 544   StarTask(narrowOop* p) {
 545     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
 546     _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
 547   }
 548   StarTask(oop* p)       {
 549     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
 550     _holder = (void*)p;
 551   }
 552   StarTask()             { _holder = NULL; }
 553   operator oop*()        { return (oop*)_holder; }
 554   operator narrowOop*()  {
 555     return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
 556   }
 557 
 558   StarTask& operator=(const StarTask& t) {
 559     _holder = t._holder;
 560     return *this;
 561   }
 562   volatile StarTask& operator=(const volatile StarTask& t) volatile {
 563     _holder = t._holder;
 564     return *this;
 565   }
 566 
 567   bool is_narrow() const {
 568     return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
 569   }
 570 };
 571 
 572 class ObjArrayTask
 573 {
 574 public:
 575   ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
 576   ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
 577     assert(idx <= size_t(max_jint), "too big");
 578   }
 579   ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
 580 
 581   ObjArrayTask& operator =(const ObjArrayTask& t) {
 582     _obj = t._obj;
 583     _index = t._index;
 584     return *this;
 585   }
 586   volatile ObjArrayTask&
 587   operator =(const volatile ObjArrayTask& t) volatile {
 588     (void)const_cast<oop&>(_obj = t._obj);
 589     _index = t._index;
 590     return *this;
 591   }
 592 
 593   inline oop obj()   const { return _obj; }
 594   inline int index() const { return _index; }
 595 
 596   DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
 597 
 598 private:
 599   oop _obj;
 600   int _index;
 601 };
 602 
 603 // ObjArrayChunkedTask
 604 //
 605 // Encodes both regular oops, and the array oops plus chunking data for parallel array processing.
 606 // The design goal is to make the regular oop ops very fast, because that would be the prevailing
 607 // case. On the other hand, it should not block parallel array processing from efficiently dividing
 608 // the array work.
 609 //
 610 // The idea is to steal the bits from the 64-bit oop to encode array data, if needed. For the
 611 // proper divide-and-conquer strategies, we want to encode the "blocking" data. It turns out, the
 612 // most efficient way to do this is to encode the array block as (chunk * 2^pow), where it is assumed
 613 // that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode
 614 // all possible arrays.
 615 //
 616 //    |---------oop---------|-pow-|--chunk---|
 617 //    0                    49     54        64
 618 //
 619 // By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1.
 620 //
 621 // This encoding gives a few interesting benefits:
 622 //
 623 // a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task:
 624 //
 625 //    |---------oop---------|00000|0000000000| // no chunk data
 626 //
 627 //    This helps the most ubiquitous path. The initialization amounts to putting the oop into the word
 628 //    with zero padding. Testing for "chunkedness" is testing for zero with chunk mask.
 629 //
 630 // b) Splitting tasks for divide-and-conquer is possible. Suppose we have chunk <C, P> that covers
 631 // interval [ (C-1)*2^P; C*2^P ). We can then split it into two chunks:
 632 //      <2*C - 1, P-1>, that covers interval [ (2*C - 2)*2^(P-1); (2*C - 1)*2^(P-1) )
 633 //      <2*C, P-1>,     that covers interval [ (2*C - 1)*2^(P-1);       2*C*2^(P-1) )
 634 //
 635 //    Observe that the union of these two intervals is:
 636 //      [ (2*C - 2)*2^(P-1); 2*C*2^(P-1) )
 637 //
 638 //    ...which is the original interval:
 639 //      [ (C-1)*2^P; C*2^P )
 640 //
 641 // c) The divide-and-conquer strategy could even start with chunk <1, round-log2-len(arr)>, and split
 642 //    down in the parallel threads, which alleviates the upfront (serial) splitting costs.
 643 //
 644 // Encoding limitations caused by current bitscales mean:
 645 //    10 bits for chunk: max 1024 blocks per array
 646 //     5 bits for power: max 2^32 array
 647 //    49 bits for   oop: max 512 TB of addressable space
 648 //
 649 // Stealing bits from oop trims down the addressable space. Stealing too few bits for chunk ID limits
 650 // potential parallelism. Stealing too few bits for pow limits the maximum array size that can be handled.
 651 // In future, these might be rebalanced to favor one degree of freedom against another. For example,
 652 // if/when Arrays 2.0 bring 2^64-sized arrays, we might need to steal another bit for power. We could regain
 653 // some bits back if chunks are counted in ObjArrayMarkingStride units.
 654 //
 655 // There is also a fallback version that uses plain fields, when we don't have enough space to steal the
 656 // bits from the native pointer. It is useful to debug the _LP64 version.
 657 //
 658 #ifdef _LP64
 659 class ObjArrayChunkedTask
 660 {
 661 public:
 662   enum {
 663     chunk_bits   = 10,
 664     pow_bits     = 5,
 665     oop_bits     = sizeof(uintptr_t)*8 - chunk_bits - pow_bits,
 666   };
 667   enum {
 668     oop_shift    = 0,
 669     pow_shift    = oop_shift + oop_bits,
 670     chunk_shift  = pow_shift + pow_bits,
 671   };
 672 
 673 public:
 674   ObjArrayChunkedTask(oop o = NULL) {
 675     _obj = ((uintptr_t)(void*) o) << oop_shift;
 676   }
 677   ObjArrayChunkedTask(oop o, int chunk, int mult) {
 678     assert(0 <= chunk && chunk < nth_bit(chunk_bits), "chunk is sane: %d", chunk);
 679     assert(0 <= mult && mult < nth_bit(pow_bits), "pow is sane: %d", mult);
 680     uintptr_t t_b = ((uintptr_t) chunk) << chunk_shift;
 681     uintptr_t t_m = ((uintptr_t) mult) << pow_shift;
 682     uintptr_t obj = (uintptr_t)(void*)o;
 683     assert(obj < nth_bit(oop_bits), "obj ref is sane: " PTR_FORMAT, obj);
 684     intptr_t t_o = obj << oop_shift;
 685     _obj = t_o | t_m | t_b;
 686   }
 687   ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj) { }
 688 
 689   ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) {
 690     _obj = t._obj;
 691     return *this;
 692   }
 693   volatile ObjArrayChunkedTask&
 694   operator =(const volatile ObjArrayChunkedTask& t) volatile {
 695     (void)const_cast<uintptr_t&>(_obj = t._obj);
 696     return *this;
 697   }
 698 
 699   inline oop obj()   const { return (oop) reinterpret_cast<void*>((_obj >> oop_shift) & right_n_bits(oop_bits)); }
 700   inline int chunk() const { return (int) (_obj >> chunk_shift) & right_n_bits(chunk_bits); }
 701   inline int pow()   const { return (int) ((_obj >> pow_shift) & right_n_bits(pow_bits)); }
 702   inline bool is_not_chunked() const { return (_obj & ~right_n_bits(oop_bits + pow_bits)) == 0; }
 703 
 704   DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
 705 
 706   static size_t max_addressable() {
 707     return nth_bit(oop_bits);
 708   }
 709 
 710   static int chunk_size() {
 711     return nth_bit(chunk_bits);
 712   }
 713 
 714 private:
 715   uintptr_t _obj;
 716 };
 717 #else
 718 class ObjArrayChunkedTask
 719 {
 720 public:
 721   enum {
 722     chunk_bits  = 10,
 723     pow_bits    = 5,
 724   };
 725 public:
 726   ObjArrayChunkedTask(oop o = NULL, int chunk = 0, int pow = 0): _obj(o) {
 727     assert(0 <= chunk && chunk < nth_bit(chunk_bits), "chunk is sane: %d", chunk);
 728     assert(0 <= pow && pow < nth_bit(pow_bits), "pow is sane: %d", pow);
 729     _chunk = chunk;
 730     _pow = pow;
 731   }
 732   ObjArrayChunkedTask(const ObjArrayChunkedTask& t): _obj(t._obj), _chunk(t._chunk), _pow(t._pow) { }
 733 
 734   ObjArrayChunkedTask& operator =(const ObjArrayChunkedTask& t) {
 735     _obj = t._obj;
 736     _chunk = t._chunk;
 737     _pow = t._pow;
 738     return *this;
 739   }
 740   volatile ObjArrayChunkedTask&
 741   operator =(const volatile ObjArrayChunkedTask& t) volatile {
 742     (void)const_cast<oop&>(_obj = t._obj);
 743     _chunk = t._chunk;
 744     _pow = t._pow;
 745     return *this;
 746   }
 747 
 748   inline oop obj()   const { return _obj; }
 749   inline int chunk() const { return _chunk; }
 750   inline int pow()  const { return _pow; }
 751 
 752   inline bool is_not_chunked() const { return _chunk == 0; }
 753 
 754   DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
 755 
 756   static size_t max_addressable() {
 757     return sizeof(oop);
 758   }
 759 
 760   static int chunk_size() {
 761     return nth_bit(chunk_bits);
 762   }
 763 
 764 private:
 765   oop _obj;
 766   int _chunk;
 767   int _pow;
 768 };
 769 #endif
 770 
 771 #ifdef _MSC_VER
 772 #pragma warning(pop)
 773 #endif
 774 
 775 typedef OverflowTaskQueue<StarTask, mtGC>           OopStarTaskQueue;
 776 typedef GenericTaskQueueSet<OopStarTaskQueue, mtGC> OopStarTaskQueueSet;
 777 
 778 typedef OverflowTaskQueue<size_t, mtGC>             RegionTaskQueue;
 779 typedef GenericTaskQueueSet<RegionTaskQueue, mtGC>  RegionTaskQueueSet;
 780 
 781 #endif // SHARE_VM_GC_SHARED_TASKQUEUE_HPP