1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/taskqueue.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "runtime/atomic.inline.hpp"
  30 #include "runtime/os.hpp"
  31 #include "runtime/thread.inline.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/stack.inline.hpp"
  34 
  35 #ifdef TRACESPINNING
  36 uint ParallelTaskTerminator::_total_yields = 0;
  37 uint ParallelTaskTerminator::_total_spins = 0;
  38 uint ParallelTaskTerminator::_total_peeks = 0;
  39 #endif
  40 
  41 #if TASKQUEUE_STATS
  42 const char * const TaskQueueStats::_names[last_stat_id] = {
  43   "qpush", "qpop", "qpop-s", "qattempt", "qsteal", "opush", "omax"
  44 };
  45 
  46 TaskQueueStats & TaskQueueStats::operator +=(const TaskQueueStats & addend)
  47 {
  48   for (unsigned int i = 0; i < last_stat_id; ++i) {
  49     _stats[i] += addend._stats[i];
  50   }
  51   return *this;
  52 }
  53 
  54 void TaskQueueStats::print_header(unsigned int line, outputStream* const stream,
  55                                   unsigned int width)
  56 {
  57   // Use a width w: 1 <= w <= max_width
  58   const unsigned int max_width = 40;
  59   const unsigned int w = MAX2(MIN2(width, max_width), 1U);
  60 
  61   if (line == 0) { // spaces equal in width to the header
  62     const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1;
  63     stream->print("%*s", hdr_width, " ");
  64   } else if (line == 1) { // labels
  65     stream->print("%*s", w, _names[0]);
  66     for (unsigned int i = 1; i < last_stat_id; ++i) {
  67       stream->print(" %*s", w, _names[i]);
  68     }
  69   } else if (line == 2) { // dashed lines
  70     char dashes[max_width + 1];
  71     memset(dashes, '-', w);
  72     dashes[w] = '\0';
  73     stream->print("%s", dashes);
  74     for (unsigned int i = 1; i < last_stat_id; ++i) {
  75       stream->print(" %s", dashes);
  76     }
  77   }
  78 }
  79 
  80 void TaskQueueStats::print(outputStream* stream, unsigned int width) const
  81 {
  82   #define FMT SIZE_FORMAT_W(*)
  83   stream->print(FMT, width, _stats[0]);
  84   for (unsigned int i = 1; i < last_stat_id; ++i) {
  85     stream->print(" " FMT, width, _stats[i]);
  86   }
  87   #undef FMT
  88 }
  89 
  90 #ifdef ASSERT
  91 // Invariants which should hold after a TaskQueue has been emptied and is
  92 // quiescent; they do not hold at arbitrary times.
  93 void TaskQueueStats::verify() const
  94 {
  95   assert(get(push) == get(pop) + get(steal),
  96          "push=" SIZE_FORMAT " pop=" SIZE_FORMAT " steal=" SIZE_FORMAT,
  97          get(push), get(pop), get(steal));
  98   assert(get(pop_slow) <= get(pop),
  99          "pop_slow=" SIZE_FORMAT " pop=" SIZE_FORMAT,
 100          get(pop_slow), get(pop));
 101   assert(get(steal) <= get(steal_attempt),
 102          "steal=" SIZE_FORMAT " steal_attempt=" SIZE_FORMAT,
 103          get(steal), get(steal_attempt));
 104   assert(get(overflow) == 0 || get(push) != 0,
 105          "overflow=" SIZE_FORMAT " push=" SIZE_FORMAT,
 106          get(overflow), get(push));
 107   assert(get(overflow_max_len) == 0 || get(overflow) != 0,
 108          "overflow_max_len=" SIZE_FORMAT " overflow=" SIZE_FORMAT,
 109          get(overflow_max_len), get(overflow));
 110 }
 111 #endif // ASSERT
 112 #endif // TASKQUEUE_STATS
 113 
 114 int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
 115   const int a =      16807;
 116   const int m = 2147483647;
 117   const int q =     127773;  /* m div a */
 118   const int r =       2836;  /* m mod a */
 119   assert(sizeof(int) == 4, "I think this relies on that");
 120   int seed = *seed0;
 121   int hi   = seed / q;
 122   int lo   = seed % q;
 123   int test = a * lo - r * hi;
 124   if (test > 0)
 125     seed = test;
 126   else
 127     seed = test + m;
 128   *seed0 = seed;
 129   return seed;
 130 }
 131 
 132 ParallelTaskTerminator::
 133 ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
 134   _n_threads(n_threads),
 135   _queue_set(queue_set),
 136   _offered_termination(0) {}
 137 
 138 bool ParallelTaskTerminator::peek_in_queue_set() {
 139   return _queue_set->peek();
 140 }
 141 
 142 void ParallelTaskTerminator::yield() {
 143   assert(_offered_termination <= _n_threads, "Invariant");
 144   os::naked_yield();
 145 }
 146 
 147 void ParallelTaskTerminator::sleep(uint millis) {
 148   assert(_offered_termination <= _n_threads, "Invariant");
 149   os::sleep(Thread::current(), millis, false);
 150 }
 151 
 152 bool
 153 ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
 154   assert(_n_threads > 0, "Initialization is incorrect");
 155   assert(_offered_termination < _n_threads, "Invariant");
 156   Atomic::inc((int *)&_offered_termination);
 157 
 158   uint yield_count = 0;
 159   // Number of hard spin loops done since last yield
 160   uint hard_spin_count = 0;
 161   // Number of iterations in the hard spin loop.
 162   uint hard_spin_limit = WorkStealingHardSpins;
 163 
 164   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
 165   // If it is greater than 0, then start with a small number
 166   // of spins and increase number with each turn at spinning until
 167   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
 168   // Then do a yield() call and start spinning afresh.
 169   if (WorkStealingSpinToYieldRatio > 0) {
 170     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
 171     hard_spin_limit = MAX2(hard_spin_limit, 1U);
 172   }
 173   // Remember the initial spin limit.
 174   uint hard_spin_start = hard_spin_limit;
 175 
 176   // Loop waiting for all threads to offer termination or
 177   // more work.
 178   while (true) {
 179     assert(_offered_termination <= _n_threads, "Invariant");
 180     // Are all threads offering termination?
 181     if (_offered_termination == _n_threads) {
 182       return true;
 183     } else {
 184       // Look for more work.
 185       // Periodically sleep() instead of yield() to give threads
 186       // waiting on the cores the chance to grab this code
 187       if (yield_count <= WorkStealingYieldsBeforeSleep) {
 188         // Do a yield or hardspin.  For purposes of deciding whether
 189         // to sleep, count this as a yield.
 190         yield_count++;
 191 
 192         // Periodically call yield() instead spinning
 193         // After WorkStealingSpinToYieldRatio spins, do a yield() call
 194         // and reset the counts and starting limit.
 195         if (hard_spin_count > WorkStealingSpinToYieldRatio) {
 196           yield();
 197           hard_spin_count = 0;
 198           hard_spin_limit = hard_spin_start;
 199 #ifdef TRACESPINNING
 200           _total_yields++;
 201 #endif
 202         } else {
 203           // Hard spin this time
 204           // Increase the hard spinning period but only up to a limit.
 205           hard_spin_limit = MIN2(2*hard_spin_limit,
 206                                  (uint) WorkStealingHardSpins);
 207           for (uint j = 0; j < hard_spin_limit; j++) {
 208             SpinPause();
 209           }
 210           hard_spin_count++;
 211 #ifdef TRACESPINNING
 212           _total_spins++;
 213 #endif
 214         }
 215       } else {
 216         log_develop(gc, task)("ParallelTaskTerminator::offer_termination() thread " PTR_FORMAT " sleeps after %u yields",
 217                               p2i(Thread::current()), yield_count);
 218         yield_count = 0;
 219         // A sleep will cause this processor to seek work on another processor's
 220         // runqueue, if it has nothing else to run (as opposed to the yield
 221         // which may only move the thread to the end of the this processor's
 222         // runqueue).
 223         sleep(WorkStealingSleepMillis);
 224       }
 225 
 226 #ifdef TRACESPINNING
 227       _total_peeks++;
 228 #endif
 229       if (peek_in_queue_set() ||
 230           (terminator != NULL && terminator->should_exit_termination())) {
 231         Atomic::dec((int *)&_offered_termination);
 232         assert(_offered_termination < _n_threads, "Invariant");
 233         return false;
 234       }
 235     }
 236   }
 237 }
 238 
 239 #ifdef TRACESPINNING
 240 void ParallelTaskTerminator::print_termination_counts() {
 241   log_trace(gc, task)("ParallelTaskTerminator Total yields: %u"
 242     " Total spins: %u Total peeks: %u",
 243     total_yields(),
 244     total_spins(),
 245     total_peeks());
 246 }
 247 #endif
 248 
 249 void ParallelTaskTerminator::reset_for_reuse() {
 250   if (_offered_termination != 0) {
 251     assert(_offered_termination == _n_threads,
 252            "Terminator may still be in use");
 253     _offered_termination = 0;
 254   }
 255 }
 256 
 257 #ifdef ASSERT
 258 bool ObjArrayTask::is_valid() const {
 259   return _obj != NULL && _obj->is_objArray() && _index >= 0 &&
 260       _index < objArrayOop(_obj)->length();
 261 }
 262 #endif // ASSERT
 263 
 264 void ParallelTaskTerminator::reset_for_reuse(uint n_threads) {
 265   reset_for_reuse();
 266   _n_threads = n_threads;
 267 }