1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/yieldingWorkgroup.hpp"
  27 #include "gc/shared/gcId.hpp"
  28 #include "utilities/macros.hpp"
  29 
  30 YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
  31     : AbstractGangWorker(gang, id) {}
  32 
  33 YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
  34     const char* name, uint workers, bool are_GC_task_threads) :
  35          AbstractWorkGang(name, workers, are_GC_task_threads, false),
  36          _yielded_workers(0),
  37          _started_workers(0),
  38          _finished_workers(0),
  39          _sequence_number(0),
  40          _task(NULL) {
  41 
  42   // Other initialization.
  43   _monitor = new Monitor(/* priority */       Mutex::leaf,
  44                          /* name */           "WorkGroup monitor",
  45                          /* allow_vm_block */ are_GC_task_threads,
  46                                               Monitor::_safepoint_check_sometimes);
  47 
  48   assert(monitor() != NULL, "Failed to allocate monitor");
  49 }
  50 
  51 AbstractGangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
  52   return new YieldingFlexibleGangWorker(this, which);
  53 }
  54 
  55 void YieldingFlexibleWorkGang::internal_worker_poll(YieldingWorkData* data) const {
  56   assert(data != NULL, "worker data is null");
  57   data->set_task(task());
  58   data->set_sequence_number(sequence_number());
  59 }
  60 
  61 void YieldingFlexibleWorkGang::internal_note_start() {
  62   assert(monitor()->owned_by_self(), "note_finish is an internal method");
  63   _started_workers += 1;
  64 }
  65 
  66 void YieldingFlexibleWorkGang::internal_note_finish() {
  67   assert(monitor()->owned_by_self(), "note_finish is an internal method");
  68   _finished_workers += 1;
  69 }
  70 
  71 // Run a task; returns when the task is done, or the workers yield,
  72 // or the task is aborted.
  73 // A task that has been yielded can be continued via this interface
  74 // by using the same task repeatedly as the argument to the call.
  75 // It is expected that the YieldingFlexibleGangTask carries the appropriate
  76 // continuation information used by workers to continue the task
  77 // from its last yield point. Thus, a completed task will return
  78 // immediately with no actual work having been done by the workers.
  79 /////////////////////
  80 // Implementatiuon notes: remove before checking XXX
  81 /*
  82 Each gang is working on a task at a certain time.
  83 Some subset of workers may have yielded and some may
  84 have finished their quota of work. Until this task has
  85 been completed, the workers are bound to that task.
  86 Once the task has been completed, the gang unbounds
  87 itself from the task.
  88 
  89 The yielding work gang thus exports two invokation
  90 interfaces: run_task() and continue_task(). The
  91 first is used to initiate a new task and bind it
  92 to the workers; the second is used to continue an
  93 already bound task that has yielded. Upon completion
  94 the binding is released and a new binding may be
  95 created.
  96 
  97 The shape of a yielding work gang is as follows:
  98 
  99 Overseer invokes run_task(*task).
 100    Lock gang monitor
 101    Check that there is no existing binding for the gang
 102    If so, abort with an error
 103    Else, create a new binding of this gang to the given task
 104    Set number of active workers (as asked)
 105    Notify workers that work is ready to be done
 106      [the requisite # workers would then start up
 107       and do the task]
 108    Wait on the monitor until either
 109      all work is completed or the task has yielded
 110      -- this is normally done through
 111         yielded + completed == active
 112         [completed workers are rest to idle state by overseer?]
 113    return appropriate status to caller
 114 
 115 Overseer invokes continue_task(*task),
 116    Lock gang monitor
 117    Check that task is the same as current binding
 118    If not, abort with an error
 119    Else, set the number of active workers as requested?
 120    Notify workers that they can continue from yield points
 121     New workers can also start up as required
 122       while satisfying the constraint that
 123          active + yielded does not exceed required number
 124    Wait (as above).
 125 
 126 NOTE: In the above, for simplicity in a first iteration
 127   our gangs will be of fixed population and will not
 128   therefore be flexible work gangs, just yielding work
 129   gangs. Once this works well, we will in a second
 130   iteration.refinement introduce flexibility into
 131   the work gang.
 132 
 133 NOTE: we can always create a new gang per each iteration
 134   in order to get the flexibility, but we will for now
 135   desist that simplified route.
 136 
 137  */
 138 /////////////////////
 139 void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
 140   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 141   assert(task() == NULL, "Gang currently tied to a task");
 142   assert(new_task != NULL, "Null task");
 143   // Bind task to gang
 144   _task = new_task;
 145   new_task->set_gang(this);  // Establish 2-way binding to support yielding
 146   _sequence_number++;
 147 
 148   uint requested_size = new_task->requested_size();
 149   if (requested_size != 0) {
 150     _active_workers = MIN2(requested_size, total_workers());
 151   } else {
 152     _active_workers = active_workers();
 153   }
 154   new_task->set_actual_size(_active_workers);
 155   new_task->set_for_termination(_active_workers);
 156 
 157   assert(_started_workers == 0, "Tabula rasa non");
 158   assert(_finished_workers == 0, "Tabula rasa non");
 159   assert(_yielded_workers == 0, "Tabula rasa non");
 160   yielding_task()->set_status(ACTIVE);
 161 
 162   // Wake up all the workers, the first few will get to work,
 163   // and the rest will go back to sleep
 164   monitor()->notify_all();
 165   wait_for_gang();
 166 }
 167 
 168 void YieldingFlexibleWorkGang::wait_for_gang() {
 169 
 170   assert(monitor()->owned_by_self(), "Data race");
 171   // Wait for task to complete or yield
 172   for (Status status = yielding_task()->status();
 173        status != COMPLETED && status != YIELDED && status != ABORTED;
 174        status = yielding_task()->status()) {
 175     assert(started_workers() <= active_workers(), "invariant");
 176     assert(finished_workers() <= active_workers(), "invariant");
 177     assert(yielded_workers() <= active_workers(), "invariant");
 178     monitor()->wait(Mutex::_no_safepoint_check_flag);
 179   }
 180   switch (yielding_task()->status()) {
 181     case COMPLETED:
 182     case ABORTED: {
 183       assert(finished_workers() == active_workers(), "Inconsistent status");
 184       assert(yielded_workers() == 0, "Invariant");
 185       reset();   // for next task; gang<->task binding released
 186       break;
 187     }
 188     case YIELDED: {
 189       assert(yielded_workers() > 0, "Invariant");
 190       assert(yielded_workers() + finished_workers() == active_workers(),
 191              "Inconsistent counts");
 192       break;
 193     }
 194     case ACTIVE:
 195     case INACTIVE:
 196     case COMPLETING:
 197     case YIELDING:
 198     case ABORTING:
 199     default:
 200       ShouldNotReachHere();
 201   }
 202 }
 203 
 204 void YieldingFlexibleWorkGang::continue_task(
 205   YieldingFlexibleGangTask* gang_task) {
 206 
 207   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 208   assert(task() != NULL && task() == gang_task, "Incorrect usage");
 209   assert(_started_workers == _active_workers, "Precondition");
 210   assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
 211          "Else why are we calling continue_task()");
 212   // Restart the yielded gang workers
 213   yielding_task()->set_status(ACTIVE);
 214   monitor()->notify_all();
 215   wait_for_gang();
 216 }
 217 
 218 void YieldingFlexibleWorkGang::reset() {
 219   _started_workers  = 0;
 220   _finished_workers = 0;
 221   yielding_task()->set_gang(NULL);
 222   _task = NULL;    // unbind gang from task
 223 }
 224 
 225 void YieldingFlexibleWorkGang::yield() {
 226   assert(task() != NULL, "Inconsistency; should have task binding");
 227   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 228   assert(yielded_workers() < active_workers(), "Consistency check");
 229   if (yielding_task()->status() == ABORTING) {
 230     // Do not yield; we need to abort as soon as possible
 231     // XXX NOTE: This can cause a performance pathology in the
 232     // current implementation in Mustang, as of today, and
 233     // pre-Mustang in that as soon as an overflow occurs,
 234     // yields will not be honoured. The right way to proceed
 235     // of course is to fix bug # TBF, so that abort's cause
 236     // us to return at each potential yield point.
 237     return;
 238   }
 239   if (++_yielded_workers + finished_workers() == active_workers()) {
 240     yielding_task()->set_status(YIELDED);
 241     monitor()->notify_all();
 242   } else {
 243     yielding_task()->set_status(YIELDING);
 244   }
 245 
 246   while (true) {
 247     switch (yielding_task()->status()) {
 248       case YIELDING:
 249       case YIELDED: {
 250         monitor()->wait(Mutex::_no_safepoint_check_flag);
 251         break;  // from switch
 252       }
 253       case ACTIVE:
 254       case ABORTING:
 255       case COMPLETING: {
 256         assert(_yielded_workers > 0, "Else why am i here?");
 257         _yielded_workers--;
 258         return;
 259       }
 260       case INACTIVE:
 261       case ABORTED:
 262       case COMPLETED:
 263       default: {
 264         ShouldNotReachHere();
 265       }
 266     }
 267   }
 268   // Only return is from inside switch statement above
 269   ShouldNotReachHere();
 270 }
 271 
 272 void YieldingFlexibleWorkGang::abort() {
 273   assert(task() != NULL, "Inconsistency; should have task binding");
 274   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 275   assert(yielded_workers() < active_workers(), "Consistency check");
 276   #ifndef PRODUCT
 277     switch (yielding_task()->status()) {
 278       // allowed states
 279       case ACTIVE:
 280       case ABORTING:
 281       case COMPLETING:
 282       case YIELDING:
 283         break;
 284       // not allowed states
 285       case INACTIVE:
 286       case ABORTED:
 287       case COMPLETED:
 288       case YIELDED:
 289       default:
 290         ShouldNotReachHere();
 291     }
 292   #endif // !PRODUCT
 293   Status prev_status = yielding_task()->status();
 294   yielding_task()->set_status(ABORTING);
 295   if (prev_status == YIELDING) {
 296     assert(yielded_workers() > 0, "Inconsistency");
 297     // At least one thread has yielded, wake it up
 298     // so it can go back to waiting stations ASAP.
 299     monitor()->notify_all();
 300   }
 301 }
 302 
 303 ///////////////////////////////
 304 // YieldingFlexibleGangTask
 305 ///////////////////////////////
 306 void YieldingFlexibleGangTask::yield() {
 307   assert(gang() != NULL, "No gang to signal");
 308   gang()->yield();
 309 }
 310 
 311 void YieldingFlexibleGangTask::abort() {
 312   assert(gang() != NULL, "No gang to signal");
 313   gang()->abort();
 314 }
 315 
 316 ///////////////////////////////
 317 // YieldingFlexibleGangWorker
 318 ///////////////////////////////
 319 void YieldingFlexibleGangWorker::loop() {
 320   int previous_sequence_number = 0;
 321   Monitor* gang_monitor = yf_gang()->monitor();
 322   MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag);
 323   YieldingWorkData data;
 324   int id;
 325   while (true) {
 326     // Check if there is work to do.
 327     yf_gang()->internal_worker_poll(&data);
 328     if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
 329       // There is work to be done.
 330       // First check if we need to become active or if there
 331       // are already the requisite number of workers
 332       if (yf_gang()->started_workers() == yf_gang()->active_workers()) {
 333         // There are already enough workers, we do not need to
 334         // to run; fall through and wait on monitor.
 335       } else {
 336         // We need to pitch in and do the work.
 337         assert(yf_gang()->started_workers() < yf_gang()->active_workers(),
 338                "Unexpected state");
 339         id = yf_gang()->started_workers();
 340         yf_gang()->internal_note_start();
 341         // Now, release the gang mutex and do the work.
 342         {
 343           MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
 344           GCIdMark gc_id_mark(data.task()->gc_id());
 345           data.task()->work(id);   // This might include yielding
 346         }
 347         // Reacquire monitor and note completion of this worker
 348         yf_gang()->internal_note_finish();
 349         // Update status of task based on whether all workers have
 350         // finished or some have yielded
 351         assert(data.task() == yf_gang()->task(), "Confused task binding");
 352         if (yf_gang()->finished_workers() == yf_gang()->active_workers()) {
 353           switch (data.yf_task()->status()) {
 354             case ABORTING: {
 355               data.yf_task()->set_status(ABORTED);
 356               break;
 357             }
 358             case ACTIVE:
 359             case COMPLETING: {
 360               data.yf_task()->set_status(COMPLETED);
 361               break;
 362             }
 363             default:
 364               ShouldNotReachHere();
 365           }
 366           gang_monitor->notify_all();  // Notify overseer
 367         } else { // at least one worker is still working or yielded
 368           assert(yf_gang()->finished_workers() < yf_gang()->active_workers(),
 369                  "Counts inconsistent");
 370           switch (data.yf_task()->status()) {
 371             case ACTIVE: {
 372               // first, but not only thread to complete
 373               data.yf_task()->set_status(COMPLETING);
 374               break;
 375             }
 376             case YIELDING: {
 377               if (yf_gang()->finished_workers() + yf_gang()->yielded_workers()
 378                   == yf_gang()->active_workers()) {
 379                 data.yf_task()->set_status(YIELDED);
 380                 gang_monitor->notify_all();  // notify overseer
 381               }
 382               break;
 383             }
 384             case ABORTING:
 385             case COMPLETING: {
 386               break; // nothing to do
 387             }
 388             default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
 389               ShouldNotReachHere();
 390           }
 391         }
 392       }
 393     }
 394     // Remember the sequence number
 395     previous_sequence_number = data.sequence_number();
 396     // Wait for more work
 397     gang_monitor->wait(Mutex::_no_safepoint_check_flag);
 398   }
 399 }