1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/taskTerminator.hpp"
  29 #include "gc/shared/taskqueue.hpp"
  30 #include "logging/log.hpp"
  31 
  32 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
  33   _n_threads(n_threads),
  34   _queue_set(queue_set),
  35   _offered_termination(0),
  36   _spin_master(NULL) {
  37 
  38   _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
  39 }
  40 
  41 TaskTerminator::~TaskTerminator() {
  42   assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
  43   assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
  44 
  45   assert(_spin_master == NULL, "Should have been reset");
  46   assert(_blocker != NULL, "Can not be NULL");
  47   delete _blocker;
  48 }
  49 
  50 #ifdef ASSERT
  51 bool TaskTerminator::peek_in_queue_set() {
  52   return _queue_set->peek();
  53 }
  54 #endif
  55 
  56 void TaskTerminator::yield() {
  57   assert(_offered_termination <= _n_threads, "Invariant");
  58   os::naked_yield();
  59 }
  60 
  61 void TaskTerminator::reset_for_reuse() {
  62   if (_offered_termination != 0) {
  63     assert(_offered_termination == _n_threads,
  64            "Terminator may still be in use");
  65     _offered_termination = 0;
  66   }
  67 }
  68 
  69 void TaskTerminator::reset_for_reuse(uint n_threads) {
  70   reset_for_reuse();
  71   _n_threads = n_threads;
  72 }
  73 
  74 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
  75   return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
  76 }
  77 
  78 size_t TaskTerminator::tasks_in_queue_set() const {
  79   return _queue_set->tasks();
  80 }
  81 
  82 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
  83   assert(_n_threads > 0, "Initialization is incorrect");
  84   assert(_offered_termination < _n_threads, "Invariant");
  85   assert(_blocker != NULL, "Invariant");
  86 
  87   // Single worker, done
  88   if (_n_threads == 1) {
  89     _offered_termination = 1;
  90     assert(!peek_in_queue_set(), "Precondition");
  91     return true;
  92   }
  93 
  94   _blocker->lock_without_safepoint_check();
  95   _offered_termination++;
  96   // All arrived, done
  97   if (_offered_termination == _n_threads) {
  98     _blocker->notify_all();
  99     _blocker->unlock();
 100     assert(!peek_in_queue_set(), "Precondition");
 101     return true;
 102   }
 103 
 104   Thread* the_thread = Thread::current();
 105   while (true) {
 106     if (_spin_master == NULL) {
 107       _spin_master = the_thread;
 108 
 109       _blocker->unlock();
 110 
 111       if (do_spin_master_work(terminator)) {
 112         assert(_offered_termination == _n_threads, "termination condition");
 113         assert(!peek_in_queue_set(), "Precondition");
 114         return true;
 115       } else {
 116         _blocker->lock_without_safepoint_check();
 117         // There is possibility that termination is reached between dropping the lock
 118         // before returning from do_spin_master_work() and acquiring lock above.
 119         if (_offered_termination == _n_threads) {
 120           _blocker->unlock();
 121           assert(!peek_in_queue_set(), "Precondition");
 122           return true;
 123         }
 124       }
 125     } else {
 126       _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
 127 
 128       if (_offered_termination == _n_threads) {
 129         _blocker->unlock();
 130         assert(!peek_in_queue_set(), "Precondition");
 131         return true;
 132       }
 133     }
 134 
 135     size_t tasks = tasks_in_queue_set();
 136     if (exit_termination(tasks, terminator)) {
 137       assert_lock_strong(_blocker);
 138       _offered_termination--;
 139       _blocker->unlock();
 140       return false;
 141     }
 142   }
 143 }
 144 
 145 bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
 146   uint yield_count = 0;
 147   // Number of hard spin loops done since last yield
 148   uint hard_spin_count = 0;
 149   // Number of iterations in the hard spin loop.
 150   uint hard_spin_limit = WorkStealingHardSpins;
 151 
 152   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
 153   // If it is greater than 0, then start with a small number
 154   // of spins and increase number with each turn at spinning until
 155   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
 156   // Then do a yield() call and start spinning afresh.
 157   if (WorkStealingSpinToYieldRatio > 0) {
 158     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
 159     hard_spin_limit = MAX2(hard_spin_limit, 1U);
 160   }
 161   // Remember the initial spin limit.
 162   uint hard_spin_start = hard_spin_limit;
 163 
 164   // Loop waiting for all threads to offer termination or
 165   // more work.
 166   while (true) {
 167     // Look for more work.
 168     // Periodically sleep() instead of yield() to give threads
 169     // waiting on the cores the chance to grab this code
 170     if (yield_count <= WorkStealingYieldsBeforeSleep) {
 171       // Do a yield or hardspin.  For purposes of deciding whether
 172       // to sleep, count this as a yield.
 173       yield_count++;
 174 
 175       // Periodically call yield() instead spinning
 176       // After WorkStealingSpinToYieldRatio spins, do a yield() call
 177       // and reset the counts and starting limit.
 178       if (hard_spin_count > WorkStealingSpinToYieldRatio) {
 179         yield();
 180         hard_spin_count = 0;
 181         hard_spin_limit = hard_spin_start;
 182       } else {
 183         // Hard spin this time
 184         // Increase the hard spinning period but only up to a limit.
 185         hard_spin_limit = MIN2(2*hard_spin_limit,
 186                                (uint) WorkStealingHardSpins);
 187         for (uint j = 0; j < hard_spin_limit; j++) {
 188           SpinPause();
 189         }
 190         hard_spin_count++;
 191       }
 192     } else {
 193       log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
 194                                   p2i(Thread::current()), yield_count);
 195       yield_count = 0;
 196 
 197       MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
 198       _spin_master = NULL;
 199       locker.wait(WorkStealingSleepMillis);
 200       if (_spin_master == NULL) {
 201         _spin_master = Thread::current();
 202       } else {
 203         return false;
 204       }
 205     }
 206 
 207     size_t tasks = tasks_in_queue_set();
 208     bool exit = exit_termination(tasks, terminator);
 209     {
 210       MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
 211       // Termination condition reached
 212       if (_offered_termination == _n_threads) {
 213         _spin_master = NULL;
 214         return true;
 215       } else if (exit) {
 216         if (tasks >= _offered_termination - 1) {
 217           locker.notify_all();
 218         } else {
 219           for (; tasks > 1; tasks--) {
 220             locker.notify();
 221           }
 222         }
 223         _spin_master = NULL;
 224         return false;
 225       }
 226     }
 227   }
 228 }