1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_TASKQUEUE_HPP
  26 #define SHARE_VM_GC_SHARED_TASKQUEUE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/padded.hpp"
  30 #include "oops/oopsHierarchy.hpp"
  31 #include "utilities/ostream.hpp"
  32 #include "utilities/stack.hpp"
  33 
  34 // Simple TaskQueue stats that are collected by default in debug builds.
  35 
  36 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
  37 #define TASKQUEUE_STATS 1
  38 #elif !defined(TASKQUEUE_STATS)
  39 #define TASKQUEUE_STATS 0
  40 #endif
  41 
  42 #if TASKQUEUE_STATS
  43 #define TASKQUEUE_STATS_ONLY(code) code
  44 #else
  45 #define TASKQUEUE_STATS_ONLY(code)
  46 #endif // TASKQUEUE_STATS
  47 
  48 #if TASKQUEUE_STATS
  49 class TaskQueueStats {
  50 public:
  51   enum StatId {
  52     push,             // number of taskqueue pushes
  53     pop,              // number of taskqueue pops
  54     pop_slow,         // subset of taskqueue pops that were done slow-path
  55     steal_attempt,    // number of taskqueue steal attempts
  56     steal,            // number of taskqueue steals
  57     overflow,         // number of overflow pushes
  58     overflow_max_len, // max length of overflow stack
  59     last_stat_id
  60   };
  61 
  62 public:
  63   inline TaskQueueStats()       { reset(); }
  64 
  65   inline void record_push()          { ++_stats[push]; }
  66   inline void record_pop()           { ++_stats[pop]; }
  67   inline void record_pop_slow()      { record_pop(); ++_stats[pop_slow]; }
  68   inline void record_steal_attempt() { ++_stats[steal_attempt]; }
  69   inline void record_steal()         { ++_stats[steal]; }
  70   inline void record_overflow(size_t new_length);
  71 
  72   TaskQueueStats & operator +=(const TaskQueueStats & addend);
  73 
  74   inline size_t get(StatId id) const { return _stats[id]; }
  75   inline const size_t* get() const   { return _stats; }
  76 
  77   inline void reset();
  78 
  79   // Print the specified line of the header (does not include a line separator).
  80   static void print_header(unsigned int line, outputStream* const stream = tty,
  81                            unsigned int width = 10);
  82   // Print the statistics (does not include a line separator).
  83   void print(outputStream* const stream = tty, unsigned int width = 10) const;
  84 
  85   DEBUG_ONLY(void verify() const;)
  86 
  87 private:
  88   size_t                    _stats[last_stat_id];
  89   static const char * const _names[last_stat_id];
  90 };
  91 
  92 void TaskQueueStats::record_overflow(size_t new_len) {
  93   ++_stats[overflow];
  94   if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
  95 }
  96 
  97 void TaskQueueStats::reset() {
  98   memset(_stats, 0, sizeof(_stats));
  99 }
 100 #endif // TASKQUEUE_STATS
 101 
 102 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
 103 
 104 template <unsigned int N, MEMFLAGS F>
 105 class TaskQueueSuper: public CHeapObj<F> {
 106 protected:
 107   // Internal type for indexing the queue; also used for the tag.
 108   typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
 109 
 110   // The first free element after the last one pushed (mod N).
 111   volatile uint _bottom;
 112 
 113   enum { MOD_N_MASK = N - 1 };
 114 
 115   class Age {
 116   public:
 117     Age(size_t data = 0)         { _data = data; }
 118     Age(const Age& age)          { _data = age._data; }
 119     Age(idx_t top, idx_t tag)    { _fields._top = top; _fields._tag = tag; }
 120 
 121     Age   get()        const volatile { return _data; }
 122     void  set(Age age) volatile       { _data = age._data; }
 123 
 124     idx_t top()        const volatile { return _fields._top; }
 125     idx_t tag()        const volatile { return _fields._tag; }
 126 
 127     // Increment top; if it wraps, increment tag also.
 128     void increment() {
 129       _fields._top = increment_index(_fields._top);
 130       if (_fields._top == 0) ++_fields._tag;
 131     }
 132 
 133     Age cmpxchg(const Age new_age, const Age old_age) volatile;
 134 
 135     bool operator ==(const Age& other) const { return _data == other._data; }
 136 
 137   private:
 138     struct fields {
 139       idx_t _top;
 140       idx_t _tag;
 141     };
 142     union {
 143       size_t _data;
 144       fields _fields;
 145     };
 146   };
 147 
 148   volatile Age _age;
 149 
 150   // These both operate mod N.
 151   static uint increment_index(uint ind) {
 152     return (ind + 1) & MOD_N_MASK;
 153   }
 154   static uint decrement_index(uint ind) {
 155     return (ind - 1) & MOD_N_MASK;
 156   }
 157 
 158   // Returns a number in the range [0..N).  If the result is "N-1", it should be
 159   // interpreted as 0.
 160   uint dirty_size(uint bot, uint top) const {
 161     return (bot - top) & MOD_N_MASK;
 162   }
 163 
 164   // Returns the size corresponding to the given "bot" and "top".
 165   uint size(uint bot, uint top) const {
 166     uint sz = dirty_size(bot, top);
 167     // Has the queue "wrapped", so that bottom is less than top?  There's a
 168     // complicated special case here.  A pair of threads could perform pop_local
 169     // and pop_global operations concurrently, starting from a state in which
 170     // _bottom == _top+1.  The pop_local could succeed in decrementing _bottom,
 171     // and the pop_global in incrementing _top (in which case the pop_global
 172     // will be awarded the contested queue element.)  The resulting state must
 173     // be interpreted as an empty queue.  (We only need to worry about one such
 174     // event: only the queue owner performs pop_local's, and several concurrent
 175     // threads attempting to perform the pop_global will all perform the same
 176     // CAS, and only one can succeed.)  Any stealing thread that reads after
 177     // either the increment or decrement will see an empty queue, and will not
 178     // join the competitors.  The "sz == -1 || sz == N-1" state will not be
 179     // modified by concurrent queues, so the owner thread can reset the state to
 180     // _bottom == top so subsequent pushes will be performed normally.
 181     return (sz == N - 1) ? 0 : sz;
 182   }
 183 
 184 public:
 185   TaskQueueSuper() : _bottom(0), _age() {}
 186 
 187   // Return true if the TaskQueue contains/does not contain any tasks.
 188   bool peek()     const { return _bottom != _age.top(); }
 189   bool is_empty() const { return size() == 0; }
 190 
 191   // Return an estimate of the number of elements in the queue.
 192   // The "careful" version admits the possibility of pop_local/pop_global
 193   // races.
 194   uint size() const {
 195     return size(_bottom, _age.top());
 196   }
 197 
 198   uint dirty_size() const {
 199     return dirty_size(_bottom, _age.top());
 200   }
 201 
 202   void set_empty() {
 203     _bottom = 0;
 204     _age.set(0);
 205   }
 206 
 207   // Maximum number of elements allowed in the queue.  This is two less
 208   // than the actual queue size, for somewhat complicated reasons.
 209   uint max_elems() const { return N - 2; }
 210 
 211   // Total size of queue.
 212   static const uint total_size() { return N; }
 213 
 214   TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
 215 };
 216 
 217 //
 218 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
 219 // ended-queue (deque), intended for use in work stealing. Queue operations
 220 // are non-blocking.
 221 //
 222 // A queue owner thread performs push() and pop_local() operations on one end
 223 // of the queue, while other threads may steal work using the pop_global()
 224 // method.
 225 //
 226 // The main difference to the original algorithm is that this
 227 // implementation allows wrap-around at the end of its allocated
 228 // storage, which is an array.
 229 //
 230 // The original paper is:
 231 //
 232 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
 233 // Thread scheduling for multiprogrammed multiprocessors.
 234 // Theory of Computing Systems 34, 2 (2001), 115-144.
 235 //
 236 // The following paper provides an correctness proof and an
 237 // implementation for weakly ordered memory models including (pseudo-)
 238 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is
 239 // similar to ABP, with the main difference that it allows resizing of the
 240 // underlying storage:
 241 //
 242 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
 243 // Correct and efficient work-stealing for weak memory models
 244 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and
 245 // practice of parallel programming (PPoPP 2013), 69-80
 246 //
 247 
 248 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
 249 class GenericTaskQueue: public TaskQueueSuper<N, F> {
 250 protected:
 251   typedef typename TaskQueueSuper<N, F>::Age Age;
 252   typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
 253 
 254   using TaskQueueSuper<N, F>::_bottom;
 255   using TaskQueueSuper<N, F>::_age;
 256   using TaskQueueSuper<N, F>::increment_index;
 257   using TaskQueueSuper<N, F>::decrement_index;
 258   using TaskQueueSuper<N, F>::dirty_size;
 259 
 260 public:
 261   using TaskQueueSuper<N, F>::max_elems;
 262   using TaskQueueSuper<N, F>::size;
 263 
 264 #if  TASKQUEUE_STATS
 265   using TaskQueueSuper<N, F>::stats;
 266 #endif
 267 
 268 private:
 269   // Slow paths for push, pop_local.  (pop_global has no fast path.)
 270   bool push_slow(E t, uint dirty_n_elems);
 271   bool pop_local_slow(uint localBot, Age oldAge);
 272 
 273 public:
 274   typedef E element_type;
 275 
 276   // Initializes the queue to empty.
 277   GenericTaskQueue();
 278 
 279   void initialize();
 280 
 281   // Push the task "t" on the queue.  Returns "false" iff the queue is full.
 282   inline bool push(E t);
 283 
 284   // Attempts to claim a task from the "local" end of the queue (the most
 285   // recently pushed) as long as the number of entries exceeds the threshold.
 286   // If successful, returns true and sets t to the task; otherwise, returns false
 287   // (the queue is empty or the number of elements below the threshold).
 288   inline bool pop_local(volatile E& t, uint threshold = 0);
 289 
 290   // Like pop_local(), but uses the "global" end of the queue (the least
 291   // recently pushed).
 292   bool pop_global(volatile E& t);
 293 
 294   // Delete any resource associated with the queue.
 295   ~GenericTaskQueue();
 296 
 297   // Apply fn to each element in the task queue.  The queue must not
 298   // be modified while iterating.
 299   template<typename Fn> void iterate(Fn fn);
 300 
 301 private:
 302   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
 303   // Element array.
 304   volatile E* _elems;
 305 
 306   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(E*));
 307   // Queue owner local variables. Not to be accessed by other threads.
 308 
 309   static const uint InvalidQueueId = uint(-1);
 310   uint _last_stolen_queue_id; // The id of the queue we last stole from
 311 
 312   int _seed; // Current random seed used for selecting a random queue during stealing.
 313 
 314   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(uint) + sizeof(int));
 315 public:
 316   int next_random_queue_id();
 317 
 318   void set_last_stolen_queue_id(uint id)     { _last_stolen_queue_id = id; }
 319   uint last_stolen_queue_id() const          { return _last_stolen_queue_id; }
 320   bool is_last_stolen_queue_id_valid() const { return _last_stolen_queue_id != InvalidQueueId; }
 321   void invalidate_last_stolen_queue_id()     { _last_stolen_queue_id = InvalidQueueId; }
 322 };
 323 
 324 template<class E, MEMFLAGS F, unsigned int N>
 325 GenericTaskQueue<E, F, N>::GenericTaskQueue() : _last_stolen_queue_id(InvalidQueueId), _seed(17 /* random number */) {
 326   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 327 }
 328 
 329 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
 330 // elements that do not fit in the TaskQueue.
 331 //
 332 // This class hides two methods from super classes:
 333 //
 334 // push() - push onto the task queue or, if that fails, onto the overflow stack
 335 // is_empty() - return true if both the TaskQueue and overflow stack are empty
 336 //
 337 // Note that size() is not hidden--it returns the number of elements in the
 338 // TaskQueue, and does not include the size of the overflow stack.  This
 339 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
 340 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
 341 class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
 342 {
 343 public:
 344   typedef Stack<E, F>               overflow_t;
 345   typedef GenericTaskQueue<E, F, N> taskqueue_t;
 346 
 347   TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
 348 
 349   // Push task t onto the queue or onto the overflow stack.  Return true.
 350   inline bool push(E t);
 351   // Try to push task t onto the queue only. Returns true if successful, false otherwise.
 352   inline bool try_push_to_taskqueue(E t);
 353 
 354   // Attempt to pop from the overflow stack; return true if anything was popped.
 355   inline bool pop_overflow(E& t);
 356 
 357   inline overflow_t* overflow_stack() { return &_overflow_stack; }
 358 
 359   inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
 360   inline bool overflow_empty()  const { return _overflow_stack.is_empty(); }
 361   inline bool is_empty()        const {
 362     return taskqueue_empty() && overflow_empty();
 363   }
 364 
 365 private:
 366   overflow_t _overflow_stack;
 367 };
 368 
 369 class TaskQueueSetSuper {
 370 public:
 371   // Returns "true" if some TaskQueue in the set contains a task.
 372   virtual bool peek() = 0;
 373 };
 374 
 375 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
 376 };
 377 
 378 template<class T, MEMFLAGS F>
 379 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
 380 public:
 381   typedef typename T::element_type E;
 382 
 383 private:
 384   uint _n;
 385   T** _queues;
 386 
 387   bool steal_best_of_2(uint queue_num, E& t);
 388 
 389 public:
 390   GenericTaskQueueSet(uint n);
 391   ~GenericTaskQueueSet();
 392 
 393   void register_queue(uint i, T* q);
 394 
 395   T* queue(uint n);
 396 
 397   // Try to steal a task from some other queue than queue_num. It may perform several attempts at doing so.
 398   // Returns if stealing succeeds, and sets "t" to the stolen task.
 399   bool steal(uint queue_num, E& t);
 400 
 401   bool peek();
 402 
 403   uint size() const { return _n; }
 404 };
 405 
 406 template<class T, MEMFLAGS F> void
 407 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
 408   assert(i < _n, "index out of range.");
 409   _queues[i] = q;
 410 }
 411 
 412 template<class T, MEMFLAGS F> T*
 413 GenericTaskQueueSet<T, F>::queue(uint i) {
 414   return _queues[i];
 415 }
 416 
 417 template<class T, MEMFLAGS F>
 418 bool GenericTaskQueueSet<T, F>::peek() {
 419   // Try all the queues.
 420   for (uint j = 0; j < _n; j++) {
 421     if (_queues[j]->peek())
 422       return true;
 423   }
 424   return false;
 425 }
 426 
 427 // When to terminate from the termination protocol.
 428 class TerminatorTerminator: public CHeapObj<mtInternal> {
 429 public:
 430   virtual bool should_exit_termination() = 0;
 431 };
 432 
 433 // A class to aid in the termination of a set of parallel tasks using
 434 // TaskQueueSet's for work stealing.
 435 
 436 #undef TRACESPINNING
 437 
 438 class ParallelTaskTerminator: public StackObj {
 439 private:
 440   uint _n_threads;
 441   TaskQueueSetSuper* _queue_set;
 442   volatile uint _offered_termination;
 443 
 444 #ifdef TRACESPINNING
 445   static uint _total_yields;
 446   static uint _total_spins;
 447   static uint _total_peeks;
 448 #endif
 449 
 450   bool peek_in_queue_set();
 451 protected:
 452   virtual void yield();
 453   void sleep(uint millis);
 454 
 455 public:
 456 
 457   // "n_threads" is the number of threads to be terminated.  "queue_set" is a
 458   // queue sets of work queues of other threads.
 459   ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
 460 
 461   // The current thread has no work, and is ready to terminate if everyone
 462   // else is.  If returns "true", all threads are terminated.  If returns
 463   // "false", available work has been observed in one of the task queues,
 464   // so the global task is not complete.
 465   bool offer_termination() {
 466     return offer_termination(NULL);
 467   }
 468 
 469   // As above, but it also terminates if the should_exit_termination()
 470   // method of the terminator parameter returns true. If terminator is
 471   // NULL, then it is ignored.
 472   bool offer_termination(TerminatorTerminator* terminator);
 473 
 474   // Reset the terminator, so that it may be reused again.
 475   // The caller is responsible for ensuring that this is done
 476   // in an MT-safe manner, once the previous round of use of
 477   // the terminator is finished.
 478   void reset_for_reuse();
 479   // Same as above but the number of parallel threads is set to the
 480   // given number.
 481   void reset_for_reuse(uint n_threads);
 482 
 483 #ifdef TRACESPINNING
 484   static uint total_yields() { return _total_yields; }
 485   static uint total_spins() { return _total_spins; }
 486   static uint total_peeks() { return _total_peeks; }
 487   static void print_termination_counts();
 488 #endif
 489 };
 490 
 491 typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
 492 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
 493 
 494 #ifdef _MSC_VER
 495 #pragma warning(push)
 496 // warning C4522: multiple assignment operators specified
 497 #pragma warning(disable:4522)
 498 #endif
 499 
 500 // This is a container class for either an oop* or a narrowOop*.
 501 // Both are pushed onto a task queue and the consumer will test is_narrow()
 502 // to determine which should be processed.
 503 class StarTask {
 504   void*  _holder;        // either union oop* or narrowOop*
 505 
 506   enum { COMPRESSED_OOP_MASK = 1 };
 507 
 508  public:
 509   StarTask(narrowOop* p) {
 510     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
 511     _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
 512   }
 513   StarTask(oop* p)       {
 514     assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
 515     _holder = (void*)p;
 516   }
 517   StarTask()             { _holder = NULL; }
 518   operator oop*()        { return (oop*)_holder; }
 519   operator narrowOop*()  {
 520     return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
 521   }
 522 
 523   StarTask& operator=(const StarTask& t) {
 524     _holder = t._holder;
 525     return *this;
 526   }
 527   volatile StarTask& operator=(const volatile StarTask& t) volatile {
 528     _holder = t._holder;
 529     return *this;
 530   }
 531 
 532   bool is_narrow() const {
 533     return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
 534   }
 535 };
 536 
 537 class ObjArrayTask
 538 {
 539 public:
 540   ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
 541   ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
 542     assert(idx <= size_t(max_jint), "too big");
 543   }
 544   ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
 545 
 546   ObjArrayTask& operator =(const ObjArrayTask& t) {
 547     _obj = t._obj;
 548     _index = t._index;
 549     return *this;
 550   }
 551   volatile ObjArrayTask&
 552   operator =(const volatile ObjArrayTask& t) volatile {
 553     (void)const_cast<oop&>(_obj = t._obj);
 554     _index = t._index;
 555     return *this;
 556   }
 557 
 558   inline oop obj()   const { return _obj; }
 559   inline int index() const { return _index; }
 560 
 561   DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
 562 
 563 private:
 564   oop _obj;
 565   int _index;
 566 };
 567 
 568 #ifdef _MSC_VER
 569 #pragma warning(pop)
 570 #endif
 571 
 572 typedef OverflowTaskQueue<StarTask, mtGC>           OopStarTaskQueue;
 573 typedef GenericTaskQueueSet<OopStarTaskQueue, mtGC> OopStarTaskQueueSet;
 574 
 575 typedef OverflowTaskQueue<size_t, mtGC>             RegionTaskQueue;
 576 typedef GenericTaskQueueSet<RegionTaskQueue, mtGC>  RegionTaskQueueSet;
 577 
 578 #endif // SHARE_VM_GC_SHARED_TASKQUEUE_HPP