1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/yieldingWorkgroup.hpp"
  27 #include "gc/shared/gcId.hpp"
  28 #include "runtime/mutexLocker.inline.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
  32     : AbstractGangWorker(gang, id) {}
  33 
  34 YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
  35     const char* name, uint workers, bool are_GC_task_threads) :
  36          AbstractWorkGang(name, workers, are_GC_task_threads, false),
  37          _yielded_workers(0),
  38          _started_workers(0),
  39          _finished_workers(0),
  40          _sequence_number(0),
  41          _task(NULL) {
  42 
  43   // Other initialization.
  44   _monitor = new Monitor(/* priority */       Mutex::leaf,
  45                          /* name */           "WorkGroup monitor",
  46                          /* allow_vm_block */ are_GC_task_threads,
  47                                               Monitor::_safepoint_check_never);
  48 
  49   assert(monitor() != NULL, "Failed to allocate monitor");
  50 }
  51 
  52 AbstractGangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
  53   return new YieldingFlexibleGangWorker(this, which);
  54 }
  55 
  56 void YieldingFlexibleWorkGang::internal_worker_poll(YieldingWorkData* data) const {
  57   assert(data != NULL, "worker data is null");
  58   data->set_task(task());
  59   data->set_sequence_number(sequence_number());
  60 }
  61 
  62 void YieldingFlexibleWorkGang::internal_note_start() {
  63   assert(monitor()->owned_by_self(), "note_finish is an internal method");
  64   _started_workers += 1;
  65 }
  66 
  67 void YieldingFlexibleWorkGang::internal_note_finish() {
  68   assert(monitor()->owned_by_self(), "note_finish is an internal method");
  69   _finished_workers += 1;
  70 }
  71 
  72 // Run a task; returns when the task is done, or the workers yield,
  73 // or the task is aborted.
  74 // A task that has been yielded can be continued via this interface
  75 // by using the same task repeatedly as the argument to the call.
  76 // It is expected that the YieldingFlexibleGangTask carries the appropriate
  77 // continuation information used by workers to continue the task
  78 // from its last yield point. Thus, a completed task will return
  79 // immediately with no actual work having been done by the workers.
  80 /////////////////////
  81 // Implementatiuon notes: remove before checking XXX
  82 /*
  83 Each gang is working on a task at a certain time.
  84 Some subset of workers may have yielded and some may
  85 have finished their quota of work. Until this task has
  86 been completed, the workers are bound to that task.
  87 Once the task has been completed, the gang unbounds
  88 itself from the task.
  89 
  90 The yielding work gang thus exports two invokation
  91 interfaces: run_task() and continue_task(). The
  92 first is used to initiate a new task and bind it
  93 to the workers; the second is used to continue an
  94 already bound task that has yielded. Upon completion
  95 the binding is released and a new binding may be
  96 created.
  97 
  98 The shape of a yielding work gang is as follows:
  99 
 100 Overseer invokes run_task(*task).
 101    Lock gang monitor
 102    Check that there is no existing binding for the gang
 103    If so, abort with an error
 104    Else, create a new binding of this gang to the given task
 105    Set number of active workers (as asked)
 106    Notify workers that work is ready to be done
 107      [the requisite # workers would then start up
 108       and do the task]
 109    Wait on the monitor until either
 110      all work is completed or the task has yielded
 111      -- this is normally done through
 112         yielded + completed == active
 113         [completed workers are rest to idle state by overseer?]
 114    return appropriate status to caller
 115 
 116 Overseer invokes continue_task(*task),
 117    Lock gang monitor
 118    Check that task is the same as current binding
 119    If not, abort with an error
 120    Else, set the number of active workers as requested?
 121    Notify workers that they can continue from yield points
 122     New workers can also start up as required
 123       while satisfying the constraint that
 124          active + yielded does not exceed required number
 125    Wait (as above).
 126 
 127 NOTE: In the above, for simplicity in a first iteration
 128   our gangs will be of fixed population and will not
 129   therefore be flexible work gangs, just yielding work
 130   gangs. Once this works well, we will in a second
 131   iteration.refinement introduce flexibility into
 132   the work gang.
 133 
 134 NOTE: we can always create a new gang per each iteration
 135   in order to get the flexibility, but we will for now
 136   desist that simplified route.
 137 
 138  */
 139 /////////////////////
 140 void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
 141   MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
 142   assert(task() == NULL, "Gang currently tied to a task");
 143   assert(new_task != NULL, "Null task");
 144   // Bind task to gang
 145   _task = new_task;
 146   new_task->set_gang(this);  // Establish 2-way binding to support yielding
 147   _sequence_number++;
 148 
 149   uint requested_size = new_task->requested_size();
 150   if (requested_size != 0) {
 151     _active_workers = MIN2(requested_size, total_workers());
 152   } else {
 153     _active_workers = active_workers();
 154   }
 155   new_task->set_actual_size(_active_workers);
 156   new_task->set_for_termination(_active_workers);
 157 
 158   assert(_started_workers == 0, "Tabula rasa non");
 159   assert(_finished_workers == 0, "Tabula rasa non");
 160   assert(_yielded_workers == 0, "Tabula rasa non");
 161   yielding_task()->set_status(ACTIVE);
 162 
 163   // Wake up all the workers, the first few will get to work,
 164   // and the rest will go back to sleep
 165   monitor()->notify_all();
 166   wait_for_gang();
 167 }
 168 
 169 void YieldingFlexibleWorkGang::wait_for_gang() {
 170 
 171   assert(monitor()->owned_by_self(), "Data race");
 172   // Wait for task to complete or yield
 173   for (Status status = yielding_task()->status();
 174        status != COMPLETED && status != YIELDED && status != ABORTED;
 175        status = yielding_task()->status()) {
 176     assert(started_workers() <= active_workers(), "invariant");
 177     assert(finished_workers() <= active_workers(), "invariant");
 178     assert(yielded_workers() <= active_workers(), "invariant");
 179     monitor()->wait_without_safepoint_check();
 180   }
 181   switch (yielding_task()->status()) {
 182     case COMPLETED:
 183     case ABORTED: {
 184       assert(finished_workers() == active_workers(), "Inconsistent status");
 185       assert(yielded_workers() == 0, "Invariant");
 186       reset();   // for next task; gang<->task binding released
 187       break;
 188     }
 189     case YIELDED: {
 190       assert(yielded_workers() > 0, "Invariant");
 191       assert(yielded_workers() + finished_workers() == active_workers(),
 192              "Inconsistent counts");
 193       break;
 194     }
 195     case ACTIVE:
 196     case INACTIVE:
 197     case COMPLETING:
 198     case YIELDING:
 199     case ABORTING:
 200     default:
 201       ShouldNotReachHere();
 202   }
 203 }
 204 
 205 void YieldingFlexibleWorkGang::continue_task(
 206   YieldingFlexibleGangTask* gang_task) {
 207 
 208   MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
 209   assert(task() != NULL && task() == gang_task, "Incorrect usage");
 210   assert(_started_workers == _active_workers, "Precondition");
 211   assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
 212          "Else why are we calling continue_task()");
 213   // Restart the yielded gang workers
 214   yielding_task()->set_status(ACTIVE);
 215   monitor()->notify_all();
 216   wait_for_gang();
 217 }
 218 
 219 void YieldingFlexibleWorkGang::reset() {
 220   _started_workers  = 0;
 221   _finished_workers = 0;
 222   yielding_task()->set_gang(NULL);
 223   _task = NULL;    // unbind gang from task
 224 }
 225 
 226 void YieldingFlexibleWorkGang::yield() {
 227   assert(task() != NULL, "Inconsistency; should have task binding");
 228   MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
 229   assert(yielded_workers() < active_workers(), "Consistency check");
 230   if (yielding_task()->status() == ABORTING) {
 231     // Do not yield; we need to abort as soon as possible
 232     // XXX NOTE: This can cause a performance pathology in the
 233     // current implementation in Mustang, as of today, and
 234     // pre-Mustang in that as soon as an overflow occurs,
 235     // yields will not be honoured. The right way to proceed
 236     // of course is to fix bug # TBF, so that abort's cause
 237     // us to return at each potential yield point.
 238     return;
 239   }
 240   if (++_yielded_workers + finished_workers() == active_workers()) {
 241     yielding_task()->set_status(YIELDED);
 242     monitor()->notify_all();
 243   } else {
 244     yielding_task()->set_status(YIELDING);
 245   }
 246 
 247   while (true) {
 248     switch (yielding_task()->status()) {
 249       case YIELDING:
 250       case YIELDED: {
 251         monitor()->wait_without_safepoint_check();
 252         break;  // from switch
 253       }
 254       case ACTIVE:
 255       case ABORTING:
 256       case COMPLETING: {
 257         assert(_yielded_workers > 0, "Else why am i here?");
 258         _yielded_workers--;
 259         return;
 260       }
 261       case INACTIVE:
 262       case ABORTED:
 263       case COMPLETED:
 264       default: {
 265         ShouldNotReachHere();
 266       }
 267     }
 268   }
 269   // Only return is from inside switch statement above
 270   ShouldNotReachHere();
 271 }
 272 
 273 void YieldingFlexibleWorkGang::abort() {
 274   assert(task() != NULL, "Inconsistency; should have task binding");
 275   MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
 276   assert(yielded_workers() < active_workers(), "Consistency check");
 277   #ifndef PRODUCT
 278     switch (yielding_task()->status()) {
 279       // allowed states
 280       case ACTIVE:
 281       case ABORTING:
 282       case COMPLETING:
 283       case YIELDING:
 284         break;
 285       // not allowed states
 286       case INACTIVE:
 287       case ABORTED:
 288       case COMPLETED:
 289       case YIELDED:
 290       default:
 291         ShouldNotReachHere();
 292     }
 293   #endif // !PRODUCT
 294   Status prev_status = yielding_task()->status();
 295   yielding_task()->set_status(ABORTING);
 296   if (prev_status == YIELDING) {
 297     assert(yielded_workers() > 0, "Inconsistency");
 298     // At least one thread has yielded, wake it up
 299     // so it can go back to waiting stations ASAP.
 300     monitor()->notify_all();
 301   }
 302 }
 303 
 304 ///////////////////////////////
 305 // YieldingFlexibleGangTask
 306 ///////////////////////////////
 307 void YieldingFlexibleGangTask::yield() {
 308   assert(gang() != NULL, "No gang to signal");
 309   gang()->yield();
 310 }
 311 
 312 void YieldingFlexibleGangTask::abort() {
 313   assert(gang() != NULL, "No gang to signal");
 314   gang()->abort();
 315 }
 316 
 317 ///////////////////////////////
 318 // YieldingFlexibleGangWorker
 319 ///////////////////////////////
 320 void YieldingFlexibleGangWorker::loop() {
 321   int previous_sequence_number = 0;
 322   Monitor* gang_monitor = yf_gang()->monitor();
 323   MutexLocker ml(gang_monitor, Mutex::_no_safepoint_check_flag);
 324   YieldingWorkData data;
 325   int id;
 326   while (true) {
 327     // Check if there is work to do.
 328     yf_gang()->internal_worker_poll(&data);
 329     if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
 330       // There is work to be done.
 331       // First check if we need to become active or if there
 332       // are already the requisite number of workers
 333       if (yf_gang()->started_workers() == yf_gang()->active_workers()) {
 334         // There are already enough workers, we do not need to
 335         // to run; fall through and wait on monitor.
 336       } else {
 337         // We need to pitch in and do the work.
 338         assert(yf_gang()->started_workers() < yf_gang()->active_workers(),
 339                "Unexpected state");
 340         id = yf_gang()->started_workers();
 341         yf_gang()->internal_note_start();
 342         // Now, release the gang mutex and do the work.
 343         {
 344           MutexUnlocker mul(gang_monitor, Mutex::_no_safepoint_check_flag);
 345           GCIdMark gc_id_mark(data.task()->gc_id());
 346           data.task()->work(id);   // This might include yielding
 347         }
 348         // Reacquire monitor and note completion of this worker
 349         yf_gang()->internal_note_finish();
 350         // Update status of task based on whether all workers have
 351         // finished or some have yielded
 352         assert(data.task() == yf_gang()->task(), "Confused task binding");
 353         if (yf_gang()->finished_workers() == yf_gang()->active_workers()) {
 354           switch (data.yf_task()->status()) {
 355             case ABORTING: {
 356               data.yf_task()->set_status(ABORTED);
 357               break;
 358             }
 359             case ACTIVE:
 360             case COMPLETING: {
 361               data.yf_task()->set_status(COMPLETED);
 362               break;
 363             }
 364             default:
 365               ShouldNotReachHere();
 366           }
 367           gang_monitor->notify_all();  // Notify overseer
 368         } else { // at least one worker is still working or yielded
 369           assert(yf_gang()->finished_workers() < yf_gang()->active_workers(),
 370                  "Counts inconsistent");
 371           switch (data.yf_task()->status()) {
 372             case ACTIVE: {
 373               // first, but not only thread to complete
 374               data.yf_task()->set_status(COMPLETING);
 375               break;
 376             }
 377             case YIELDING: {
 378               if (yf_gang()->finished_workers() + yf_gang()->yielded_workers()
 379                   == yf_gang()->active_workers()) {
 380                 data.yf_task()->set_status(YIELDED);
 381                 gang_monitor->notify_all();  // notify overseer
 382               }
 383               break;
 384             }
 385             case ABORTING:
 386             case COMPLETING: {
 387               break; // nothing to do
 388             }
 389             default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
 390               ShouldNotReachHere();
 391           }
 392         }
 393       }
 394     }
 395     // Remember the sequence number
 396     previous_sequence_number = data.sequence_number();
 397     // Wait for more work
 398     gang_monitor->wait_without_safepoint_check();
 399   }
 400 }