1 /*
   2  * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  27 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 
  31 void ShenandoahObjToScanQueueSet::clear() {
  32   uint size = GenericTaskQueueSet<ShenandoahObjToScanQueue, mtGC>::size();
  33   for (uint index = 0; index < size; index ++) {
  34     ShenandoahObjToScanQueue* q = queue(index);
  35     assert(q != NULL, "Sanity");
  36     q->clear();
  37   }
  38 }
  39 
  40 bool ShenandoahObjToScanQueueSet::is_empty() {
  41   uint size = GenericTaskQueueSet<ShenandoahObjToScanQueue, mtGC>::size();
  42   for (uint index = 0; index < size; index ++) {
  43     ShenandoahObjToScanQueue* q = queue(index);
  44     assert(q != NULL, "Sanity");
  45     if (!q->is_empty()) {
  46       return false;
  47     }
  48   }
  49   return true;
  50 }
  51 
  52 bool ShenandoahTaskTerminator::offer_termination(ShenandoahTerminatorTerminator* terminator) {
  53   assert(_n_threads > 0, "Initialization is incorrect");
  54   assert(_offered_termination < _n_threads, "Invariant");
  55   assert(_blocker != NULL, "Invariant");
  56 
  57   // single worker, done
  58   if (_n_threads == 1) {
  59     return true;
  60   }
  61 
  62   _blocker->lock_without_safepoint_check();
  63   // all arrived, done
  64   if (++ _offered_termination == _n_threads) {
  65     _blocker->notify_all();
  66     _blocker->unlock();
  67     return true;
  68   }
  69 
  70   Thread* the_thread = Thread::current();
  71   while (true) {
  72     if (_spin_master == NULL) {
  73       _spin_master = the_thread;
  74 
  75       _blocker->unlock();
  76 
  77       if (do_spin_master_work(terminator)) {
  78         assert(_offered_termination == _n_threads, "termination condition");
  79         return true;
  80       } else {
  81         _blocker->lock_without_safepoint_check();
  82       }
  83     } else {
  84       _blocker->wait(true, WorkStealingSleepMillis);
  85 
  86       if (_offered_termination == _n_threads) {
  87         _blocker->unlock();
  88         return true;
  89       }
  90     }
  91 
  92     if (peek_in_queue_set() || (terminator != NULL && terminator->should_exit_termination())) {
  93       _offered_termination --;
  94       _blocker->unlock();
  95       return false;
  96     }
  97   }
  98 }
  99 
 100 #if TASKQUEUE_STATS
 101 void ShenandoahObjToScanQueueSet::print_taskqueue_stats_hdr(outputStream* const st) {
 102   st->print_raw_cr("GC Task Stats");
 103   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 104   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 105 }
 106 
 107 void ShenandoahObjToScanQueueSet::print_taskqueue_stats() const {
 108   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 109     return;
 110   }
 111   Log(gc, task, stats) log;
 112   ResourceMark rm;
 113   LogStream ls(log.trace());
 114   outputStream* st = &ls;
 115   print_taskqueue_stats_hdr(st);
 116 
 117   ShenandoahObjToScanQueueSet* queues = const_cast<ShenandoahObjToScanQueueSet*>(this);
 118   TaskQueueStats totals;
 119   const uint n = size();
 120   for (uint i = 0; i < n; ++i) {
 121     st->print(UINT32_FORMAT_W(3), i);
 122     queues->queue(i)->stats.print(st);
 123     st->cr();
 124     totals += queues->queue(i)->stats;
 125   }
 126   st->print("tot "); totals.print(st); st->cr();
 127   DEBUG_ONLY(totals.verify());
 128 
 129 }
 130 
 131 void ShenandoahObjToScanQueueSet::reset_taskqueue_stats() {
 132   const uint n = size();
 133   for (uint i = 0; i < n; ++i) {
 134     queue(i)->stats.reset();
 135   }
 136 }
 137 #endif // TASKQUEUE_STATS
 138 
 139 bool ShenandoahTaskTerminator::do_spin_master_work(ShenandoahTerminatorTerminator* terminator) {
 140   uint yield_count = 0;
 141   // Number of hard spin loops done since last yield
 142   uint hard_spin_count = 0;
 143   // Number of iterations in the hard spin loop.
 144   uint hard_spin_limit = WorkStealingHardSpins;
 145 
 146   // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
 147   // If it is greater than 0, then start with a small number
 148   // of spins and increase number with each turn at spinning until
 149   // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
 150   // Then do a yield() call and start spinning afresh.
 151   if (WorkStealingSpinToYieldRatio > 0) {
 152     hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
 153     hard_spin_limit = MAX2(hard_spin_limit, 1U);
 154   }
 155   // Remember the initial spin limit.
 156   uint hard_spin_start = hard_spin_limit;
 157 
 158   // Loop waiting for all threads to offer termination or
 159   // more work.
 160   while (true) {
 161     // Look for more work.
 162     // Periodically sleep() instead of yield() to give threads
 163     // waiting on the cores the chance to grab this code
 164     if (yield_count <= WorkStealingYieldsBeforeSleep) {
 165       // Do a yield or hardspin.  For purposes of deciding whether
 166       // to sleep, count this as a yield.
 167       yield_count++;
 168 
 169       // Periodically call yield() instead spinning
 170       // After WorkStealingSpinToYieldRatio spins, do a yield() call
 171       // and reset the counts and starting limit.
 172       if (hard_spin_count > WorkStealingSpinToYieldRatio) {
 173         yield();
 174         hard_spin_count = 0;
 175         hard_spin_limit = hard_spin_start;
 176 #ifdef TRACESPINNING
 177         _total_yields++;
 178 #endif
 179       } else {
 180         // Hard spin this time
 181         // Increase the hard spinning period but only up to a limit.
 182         hard_spin_limit = MIN2(2*hard_spin_limit,
 183                                (uint) WorkStealingHardSpins);
 184         for (uint j = 0; j < hard_spin_limit; j++) {
 185           SpinPause();
 186         }
 187         hard_spin_count++;
 188 #ifdef TRACESPINNING
 189         _total_spins++;
 190 #endif
 191       }
 192     } else {
 193       log_develop_trace(gc, task)("ShenanddoahTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
 194                                   p2i(Thread::current()), yield_count);
 195       yield_count = 0;
 196 
 197       MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag);   // no safepoint check
 198       _spin_master = NULL;
 199       locker.wait(Mutex::_no_safepoint_check_flag, WorkStealingSleepMillis);
 200       if (_spin_master == NULL) {
 201         _spin_master = Thread::current();
 202       } else {
 203         return false;
 204       }
 205     }
 206 
 207 #ifdef TRACESPINNING
 208       _total_peeks++;
 209 #endif
 210     size_t tasks = tasks_in_queue_set();
 211     if (tasks > 0 || (terminator != NULL && terminator->should_exit_termination())) {
 212       MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag);   // no safepoint check
 213 
 214       if (tasks >= _offered_termination - 1) {
 215         locker.notify_all();
 216       } else {
 217         for (; tasks > 1; tasks --) {
 218           locker.notify();
 219         }
 220       }
 221       _spin_master = NULL;
 222       return false;
 223     } else if (_offered_termination == _n_threads) {
 224       return true;
 225     }
 226   }
 227 }