1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/owstTaskTerminator.hpp"
  27 #include "logging/log.hpp"
  28 
  29 bool OWSTTaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
  30   return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
  31 }
  32 
  33 bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
  34   assert(_n_threads > 0, "Initialization is incorrect");
  35   assert(_offered_termination < _n_threads, "Invariant");
  36   assert(_blocker != NULL, "Invariant");
  37 
  38   // Single worker, done
  39   if (_n_threads == 1) {
  40     _offered_termination = 1;
  41     assert(!peek_in_queue_set(), "Precondition");
  42     return true;
  43   }
  44 
  45   _blocker->lock_without_safepoint_check();
  46   _offered_termination++;
  47   // All arrived, done
  48   if (_offered_termination == _n_threads) {
  49     _blocker->notify_all();
  50     _blocker->unlock();
  51     assert(!peek_in_queue_set(), "Precondition");
  52     return true;
  53   }
  54 
  55   Thread* the_thread = Thread::current();
  56   while (true) {
  57     if (_spin_master == NULL) {
  58       _spin_master = the_thread;
  59 
  60       _blocker->unlock();
  61 
  62       if (do_spin_master_work(terminator)) {
  63         assert(_offered_termination == _n_threads, "termination condition");
  64         assert(!peek_in_queue_set(), "Precondition");
  65         return true;
  66       } else {
  67         _blocker->lock_without_safepoint_check();
  68         // There is possibility that termination is reached between dropping the lock
  69         // before returning from do_spin_master_work() and acquiring lock above.
  70         if (_offered_termination == _n_threads) {
  71           _blocker->unlock();
  72           assert(!peek_in_queue_set(), "Precondition");
  73           return true;
  74         }
  75       }
  76     } else {
  77       _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
  78 
  79       if (_offered_termination == _n_threads) {
  80         _blocker->unlock();
  81         assert(!peek_in_queue_set(), "Precondition");
  82         return true;
  83       }
  84     }
  85 
  86     size_t tasks = tasks_in_queue_set();
  87     if (exit_termination(tasks, terminator)) {
  88       assert_lock_strong(_blocker);
  89       _offered_termination--;
  90       _blocker->unlock();
  91       return false;
  92     }
  93   }
  94 }
  95 
  96 bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
  97   uint yield_count = 0;
  98   // Number of hard spin loops done since last yield
  99   uint hard_spin_count = 0;
 100   // Number of iterations in the hard spin loop.
 101   uint hard_spin_limit = WorkStealingHardSpins;
 102 
 103   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
 104   // If it is greater than 0, then start with a small number
 105   // of spins and increase number with each turn at spinning until
 106   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
 107   // Then do a yield() call and start spinning afresh.
 108   if (WorkStealingSpinToYieldRatio > 0) {
 109     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
 110     hard_spin_limit = MAX2(hard_spin_limit, 1U);
 111   }
 112   // Remember the initial spin limit.
 113   uint hard_spin_start = hard_spin_limit;
 114 
 115   // Loop waiting for all threads to offer termination or
 116   // more work.
 117   while (true) {
 118     // Look for more work.
 119     // Periodically sleep() instead of yield() to give threads
 120     // waiting on the cores the chance to grab this code
 121     if (yield_count <= WorkStealingYieldsBeforeSleep) {
 122       // Do a yield or hardspin.  For purposes of deciding whether
 123       // to sleep, count this as a yield.
 124       yield_count++;
 125 
 126       // Periodically call yield() instead spinning
 127       // After WorkStealingSpinToYieldRatio spins, do a yield() call
 128       // and reset the counts and starting limit.
 129       if (hard_spin_count > WorkStealingSpinToYieldRatio) {
 130         yield();
 131         hard_spin_count = 0;
 132         hard_spin_limit = hard_spin_start;
 133 #ifdef TRACESPINNING
 134         _total_yields++;
 135 #endif
 136       } else {
 137         // Hard spin this time
 138         // Increase the hard spinning period but only up to a limit.
 139         hard_spin_limit = MIN2(2*hard_spin_limit,
 140                                (uint) WorkStealingHardSpins);
 141         for (uint j = 0; j < hard_spin_limit; j++) {
 142           SpinPause();
 143         }
 144         hard_spin_count++;
 145 #ifdef TRACESPINNING
 146         _total_spins++;
 147 #endif
 148       }
 149     } else {
 150       log_develop_trace(gc, task)("OWSTTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
 151                                   p2i(Thread::current()), yield_count);
 152       yield_count = 0;
 153 
 154       MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
 155       _spin_master = NULL;
 156       locker.wait(WorkStealingSleepMillis);
 157       if (_spin_master == NULL) {
 158         _spin_master = Thread::current();
 159       } else {
 160         return false;
 161       }
 162     }
 163 
 164 #ifdef TRACESPINNING
 165       _total_peeks++;
 166 #endif
 167     size_t tasks = tasks_in_queue_set();
 168     bool exit = exit_termination(tasks, terminator);
 169     {
 170       MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
 171       // Termination condition reached
 172       if (_offered_termination == _n_threads) {
 173         _spin_master = NULL;
 174         return true;
 175       } else if (exit) {
 176         if (tasks >= _offered_termination - 1) {
 177           locker.notify_all();
 178         } else {
 179           for (; tasks > 1; tasks--) {
 180             locker.notify();
 181           }
 182         }
 183         _spin_master = NULL;
 184         return false;
 185       }
 186     }
 187   }
 188 }