1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/owstTaskTerminator.hpp"
  27 #include "logging/log.hpp"
  28 
  29 bool OWSTTaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
  30   return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
  31 }
  32 
  33 bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
  34   assert(_n_threads > 0, "Initialization is incorrect");
  35   assert(_offered_termination < _n_threads, "Invariant");
  36   assert(_blocker != NULL, "Invariant");
  37 
  38   // Single worker, done.
  39   if (_n_threads == 1) {
  40     return true;
  41   }
  42 
  43   _blocker->lock_without_safepoint_check();
  44   // All terminated, done.
  45   _offered_termination++;
  46   if (_offered_termination == _n_threads) {
  47     _blocker->notify_all();
  48     _blocker->unlock();
  49     return true;
  50   }
  51 
  52   Thread* the_thread = Thread::current();
  53   while (true) {
  54     if (_spin_master == NULL) {
  55       _spin_master = the_thread;
  56 
  57       _blocker->unlock();
  58 
  59       if (do_spin_master_work(terminator)) {
  60         assert(_offered_termination == _n_threads, "termination condition");
  61         return true;
  62       } else {
  63         _blocker->lock_without_safepoint_check();
  64       }
  65     } else {
  66       _blocker->wait(true, WorkStealingSleepMillis);
  67 
  68       if (_offered_termination == _n_threads) {
  69         _blocker->unlock();
  70         return true;
  71       }
  72     }
  73 
  74     size_t tasks = tasks_in_queue_set();
  75     if (exit_termination(tasks, terminator)) {
  76       _offered_termination--;
  77       _blocker->unlock();
  78       return false;
  79     }
  80   }
  81 }
  82 
  83 bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
  84   uint yield_count = 0;
  85   // Number of hard spin loops done since last yield
  86   uint hard_spin_count = 0;
  87   // Number of iterations in the hard spin loop.
  88   uint hard_spin_limit = WorkStealingHardSpins;
  89 
  90   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
  91   // If it is greater than 0, then start with a small number
  92   // of spins and increase number with each turn at spinning until
  93   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
  94   // Then do a yield() call and start spinning afresh.
  95   if (WorkStealingSpinToYieldRatio > 0) {
  96     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
  97     hard_spin_limit = MAX2(hard_spin_limit, 1U);
  98   }
  99   // Remember the initial spin limit.
 100   uint hard_spin_start = hard_spin_limit;
 101 
 102   // Loop waiting for all threads to offer termination or
 103   // more work.
 104   while (true) {
 105     // Look for more work.
 106     // Periodically sleep() instead of yield() to give threads
 107     // waiting on the cores the chance to grab this code
 108     if (yield_count <= WorkStealingYieldsBeforeSleep) {
 109       // Do a yield or hardspin.  For purposes of deciding whether
 110       // to sleep, count this as a yield.
 111       yield_count++;
 112 
 113       // Periodically call yield() instead spinning
 114       // After WorkStealingSpinToYieldRatio spins, do a yield() call
 115       // and reset the counts and starting limit.
 116       if (hard_spin_count > WorkStealingSpinToYieldRatio) {
 117         yield();
 118         hard_spin_count = 0;
 119         hard_spin_limit = hard_spin_start;
 120 #ifdef TRACESPINNING
 121         _total_yields++;
 122 #endif
 123       } else {
 124         // Hard spin this time
 125         // Increase the hard spinning period but only up to a limit.
 126         hard_spin_limit = MIN2(2*hard_spin_limit,
 127                                (uint) WorkStealingHardSpins);
 128         for (uint j = 0; j < hard_spin_limit; j++) {
 129           SpinPause();
 130         }
 131         hard_spin_count++;
 132 #ifdef TRACESPINNING
 133         _total_spins++;
 134 #endif
 135       }
 136     } else {
 137       log_develop_trace(gc, task)("OWSTTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
 138                                   p2i(Thread::current()), yield_count);
 139       yield_count = 0;
 140 
 141       MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag);
 142       _spin_master = NULL;
 143       locker.wait(Mutex::_no_safepoint_check_flag, WorkStealingSleepMillis);
 144       if (_spin_master == NULL) {
 145         _spin_master = Thread::current();
 146       } else {
 147         return false;
 148       }
 149     }
 150 
 151 #ifdef TRACESPINNING
 152     _total_peeks++;
 153 #endif
 154     size_t tasks = tasks_in_queue_set();
 155     if (exit_termination(tasks, terminator)) {
 156       MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag);
 157 
 158       if (tasks >= _offered_termination - 1) {
 159         locker.notify_all();
 160       } else {
 161         for (; tasks > 1; tasks --) {
 162           locker.notify();
 163         }
 164       }
 165       _spin_master = NULL;
 166       return false;
 167     } else if (_offered_termination == _n_threads) {
 168       return true;
 169     }
 170   }
 171 }