< prev index next >

src/hotspot/share/gc/shared/taskTerminator.cpp

Print this page
rev 59861 : [mq]: 8245721-refactor-taskterminator
   1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 

  28 #include "gc/shared/taskTerminator.hpp"
  29 #include "gc/shared/taskqueue.hpp"
  30 #include "logging/log.hpp"









  31 
  32 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
  33   _n_threads(n_threads),
  34   _queue_set(queue_set),
  35   _offered_termination(0),
  36   _spin_master(NULL) {
  37 
  38   _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
  39 }
  40 
  41 TaskTerminator::~TaskTerminator() {
  42   if (_offered_termination != 0) {
  43     assert(_offered_termination == _n_threads, "Must be terminated or aborted");
  44     assert_queue_set_empty();
  45   }
  46 
  47   assert(_spin_master == NULL, "Should have been reset");
  48   assert(_blocker != NULL, "Can not be NULL");
  49   delete _blocker;
  50 }
  51 
  52 #ifdef ASSERT
  53 void TaskTerminator::assert_queue_set_empty() const {
  54   _queue_set->assert_empty();
  55 }
  56 #endif
  57 
  58 void TaskTerminator::yield() {
  59   assert(_offered_termination <= _n_threads, "Invariant");
  60   os::naked_yield();
  61 }
  62 
  63 void TaskTerminator::reset_for_reuse() {
  64   if (_offered_termination != 0) {
  65     assert(_offered_termination == _n_threads,
  66            "Terminator may still be in use");

  67     _offered_termination = 0;
  68   }
  69 }
  70 
  71 void TaskTerminator::reset_for_reuse(uint n_threads) {
  72   reset_for_reuse();
  73   _n_threads = n_threads;
  74 }
  75 
  76 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
  77   return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
  78 }
  79 
  80 size_t TaskTerminator::tasks_in_queue_set() const {
  81   return _queue_set->tasks();
  82 }
  83 














































  84 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
  85   assert(_n_threads > 0, "Initialization is incorrect");
  86   assert(_offered_termination < _n_threads, "Invariant");
  87   assert(_blocker != NULL, "Invariant");
  88 
  89   // Single worker, done
  90   if (_n_threads == 1) {
  91     _offered_termination = 1;
  92     assert_queue_set_empty();
  93     return true;
  94   }
  95 
  96   _blocker->lock_without_safepoint_check();



  97   _offered_termination++;
  98   // All arrived, done
  99   if (_offered_termination == _n_threads) {
 100     _blocker->notify_all();
 101     _blocker->unlock();
 102     assert_queue_set_empty();
 103     return true;
 104   }
 105 
 106   Thread* the_thread = Thread::current();
 107   while (true) {
 108     if (_spin_master == NULL) {
 109       _spin_master = the_thread;
 110 
 111       _blocker->unlock();






 112 
 113       if (do_spin_master_work(terminator)) {
 114         assert(_offered_termination == _n_threads, "termination condition");
 115         assert_queue_set_empty();
 116         return true;
 117       } else {
 118         _blocker->lock_without_safepoint_check();
 119         // There is possibility that termination is reached between dropping the lock
 120         // before returning from do_spin_master_work() and acquiring lock above.
 121         if (_offered_termination == _n_threads) {
 122           _blocker->unlock();
 123           assert_queue_set_empty();
 124           return true;
 125         }
 126       }
 127     } else {
 128       _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
 129 


 130       if (_offered_termination == _n_threads) {
 131         _blocker->unlock();
 132         assert_queue_set_empty();
 133         return true;
 134       }
 135     }
 136 
 137     size_t tasks = tasks_in_queue_set();
 138     if (exit_termination(tasks, terminator)) {
 139       assert_lock_strong(_blocker);
 140       _offered_termination--;
 141       _blocker->unlock();
 142       return false;
 143     }
 144   }
 145 }
 146 
 147 bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
 148   uint yield_count = 0;
 149   // Number of hard spin loops done since last yield
 150   uint hard_spin_count = 0;
 151   // Number of iterations in the hard spin loop.
 152   uint hard_spin_limit = WorkStealingHardSpins;
 153 
 154   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
 155   // If it is greater than 0, then start with a small number
 156   // of spins and increase number with each turn at spinning until
 157   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
 158   // Then do a yield() call and start spinning afresh.
 159   if (WorkStealingSpinToYieldRatio > 0) {
 160     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
 161     hard_spin_limit = MAX2(hard_spin_limit, 1U);
 162   }
 163   // Remember the initial spin limit.
 164   uint hard_spin_start = hard_spin_limit;
 165 
 166   // Loop waiting for all threads to offer termination or
 167   // more work.
 168   while (true) {
 169     // Look for more work.
 170     // Periodically sleep() instead of yield() to give threads
 171     // waiting on the cores the chance to grab this code
 172     if (yield_count <= WorkStealingYieldsBeforeSleep) {
 173       // Do a yield or hardspin.  For purposes of deciding whether
 174       // to sleep, count this as a yield.
 175       yield_count++;
 176 
 177       // Periodically call yield() instead spinning
 178       // After WorkStealingSpinToYieldRatio spins, do a yield() call
 179       // and reset the counts and starting limit.
 180       if (hard_spin_count > WorkStealingSpinToYieldRatio) {
 181         yield();
 182         hard_spin_count = 0;
 183         hard_spin_limit = hard_spin_start;
 184       } else {
 185         // Hard spin this time
 186         // Increase the hard spinning period but only up to a limit.
 187         hard_spin_limit = MIN2(2*hard_spin_limit,
 188                                (uint) WorkStealingHardSpins);
 189         for (uint j = 0; j < hard_spin_limit; j++) {
 190           SpinPause();
 191         }
 192         hard_spin_count++;
 193       }
 194     } else {
 195       log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
 196                                   p2i(Thread::current()), yield_count);
 197       yield_count = 0;
 198 
 199       MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
 200       _spin_master = NULL;
 201       locker.wait(WorkStealingSleepMillis);
 202       if (_spin_master == NULL) {
 203         _spin_master = Thread::current();
 204       } else {
 205         return false;
 206       }
 207     }

 208 
 209     size_t tasks = tasks_in_queue_set();
 210     bool exit = exit_termination(tasks, terminator);
 211     {
 212       MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
 213       // Termination condition reached
 214       if (_offered_termination == _n_threads) {
 215         _spin_master = NULL;

 216         return true;
 217       } else if (exit) {
 218         if (tasks >= _offered_termination - 1) {
 219           locker.notify_all();
 220         } else {
 221           for (; tasks > 1; tasks--) {
 222             locker.notify();
 223           }
 224         }
 225         _spin_master = NULL;
 226         return false;
 227       }
 228     }

 229   }
 230 }
   1 /*
   2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
   3  * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "gc/shared/taskTerminator.hpp"
  30 #include "gc/shared/taskqueue.hpp"
  31 #include "logging/log.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "runtime/mutexLocker.hpp"
  34 #include "runtime/thread.hpp"
  35 
  36 void TaskTerminator::SpinContext::reset() {
  37   yield_count = 0;
  38   hard_spin_count = 0;
  39   hard_spin_limit = WorkStealingHardSpins;
  40 }
  41 
  42 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
  43   _n_threads(n_threads),
  44   _queue_set(queue_set),
  45   _offered_termination(0),
  46   _spin_master(NULL) {
  47 
  48   _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
  49 }
  50 
  51 TaskTerminator::~TaskTerminator() {
  52   if (_offered_termination != 0) {
  53     assert(_offered_termination == _n_threads, "Must be terminated or aborted");
  54     assert_queue_set_empty();
  55   }
  56 
  57   assert(_spin_master == NULL, "Should have been reset");
  58   assert(_blocker != NULL, "Can not be NULL");
  59   delete _blocker;
  60 }
  61 
  62 #ifdef ASSERT
  63 void TaskTerminator::assert_queue_set_empty() const {
  64   _queue_set->assert_empty();
  65 }
  66 #endif
  67 
  68 void TaskTerminator::yield() {
  69   assert(_offered_termination <= _n_threads, "Invariant");
  70   os::naked_yield();
  71 }
  72 
  73 void TaskTerminator::reset_for_reuse() {
  74   if (_offered_termination != 0) {
  75     assert(_offered_termination == _n_threads,
  76            "Only %u of %u threads offered termination", _offered_termination, _n_threads);
  77     assert(_spin_master == NULL, "Leftover spin master " PTR_FORMAT, p2i(_spin_master));
  78     _offered_termination = 0;
  79   }
  80 }
  81 
  82 void TaskTerminator::reset_for_reuse(uint n_threads) {
  83   reset_for_reuse();
  84   _n_threads = n_threads;
  85 }
  86 
  87 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
  88   return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
  89 }
  90 
  91 size_t TaskTerminator::tasks_in_queue_set() const {
  92   return _queue_set->tasks();
  93 }
  94 
  95 void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) {
  96   assert(_blocker->is_locked(), "must be");
  97   assert(_blocker->owned_by_self(), "must be");
  98   assert(_offered_termination >= 1, "must be");
  99 
 100   if (_spin_master == this_thread) {
 101     _spin_master = NULL;
 102   }
 103 
 104   if (tasks >= _offered_termination - 1) {
 105     _blocker->notify_all();
 106   } else {
 107     for (; tasks > 1; tasks--) {
 108       _blocker->notify();
 109     }
 110   }
 111 }
 112 
 113 bool TaskTerminator::do_spin_iteration(SpinContext& spin_context) {
 114   assert(!_blocker->owned_by_self(), "should not be owned by self");
 115 
 116   if (spin_context.yield_count <= WorkStealingYieldsBeforeSleep) {
 117     spin_context.yield_count++;
 118     // Periodically call yield() instead spinning
 119     // After WorkStealingSpinToYieldRatio spins, do a yield() call
 120     // and reset the counts and starting limit.
 121     if (spin_context.hard_spin_count > WorkStealingSpinToYieldRatio) {
 122       os::naked_yield();
 123       spin_context.hard_spin_count = 0;
 124       spin_context.hard_spin_limit = WorkStealingHardSpins;
 125     } else {
 126       // Hard spin this time
 127       // Increase the hard spinning period but only up to a limit.
 128       spin_context.hard_spin_limit = MIN2(2 * spin_context.hard_spin_limit,
 129                                           (uint) WorkStealingHardSpins);
 130       for (uint j = 0; j < spin_context.hard_spin_limit; j++) {
 131         SpinPause();
 132       }
 133       spin_context.hard_spin_count++;
 134     }
 135     return false;
 136   } else {
 137     return true;
 138   }
 139 }
 140 
 141 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
 142   assert(_n_threads > 0, "Initialization is incorrect");
 143   assert(_offered_termination < _n_threads, "Invariant");
 144   assert(_blocker != NULL, "Invariant");
 145 
 146   // Single worker, done
 147   if (_n_threads == 1) {
 148     _offered_termination = 1;
 149     assert_queue_set_empty();
 150     return true;
 151   }
 152 
 153   Thread* the_thread = Thread::current();
 154   SpinContext spin_context;
 155 
 156   MutexLocker x(_blocker, Mutex::_no_safepoint_check_flag);
 157   _offered_termination++;
 158 
 159   if (_offered_termination == _n_threads) {
 160     prepare_for_return(the_thread);

 161     assert_queue_set_empty();
 162     return true;
 163   }
 164 
 165   for (;;) {

 166     if (_spin_master == NULL) {
 167       _spin_master = the_thread;
 168 
 169       bool giveup_spin;
 170       do {
 171         size_t tasks;
 172         bool should_exit_termination;
 173         {
 174           MutexUnlocker y(_blocker, Mutex::_no_safepoint_check_flag);
 175           giveup_spin = do_spin_iteration(spin_context);
 176 
 177           // Dirty read of exit condition.
 178           tasks = tasks_in_queue_set();
 179           should_exit_termination = exit_termination(tasks, terminator);










 180         }


 181 
 182         // Immediately check exit conditions after re-acquiring the lock using the
 183         // information gathered just recently.
 184         if (_offered_termination == _n_threads) {
 185           prepare_for_return(the_thread);
 186           assert_queue_set_empty();
 187           return true;
 188         } else if (should_exit_termination) {
 189           prepare_for_return(the_thread, tasks);




 190           _offered_termination--;

 191           return false;
 192         }
 193       } while (!giveup_spin);
 194       // Give up spin master before sleeping.






















































 195       _spin_master = NULL;






 196     }
 197     _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
 198 
 199     // Immediately check exit conditions after re-acquiring the lock.




 200     if (_offered_termination == _n_threads) {
 201       prepare_for_return(the_thread);
 202       assert_queue_set_empty();
 203       return true;



 204     } else {
 205       size_t tasks = tasks_in_queue_set();
 206       if (exit_termination(tasks, terminator)) {
 207         prepare_for_return(the_thread, tasks);
 208         _offered_termination--;

 209         return false;
 210       }
 211     }
 212     spin_context.reset();
 213   }
 214 }
< prev index next >