1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/yieldingWorkgroup.hpp"
  27 #include "utilities/macros.hpp"
  28 
  29 YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
  30     : AbstractGangWorker(gang, id) {}
  31 
  32 YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
  33     const char* name, uint workers, bool are_GC_task_threads) :
  34          AbstractWorkGang(name, workers, are_GC_task_threads, false),
  35          _yielded_workers(0),
  36          _started_workers(0),
  37          _finished_workers(0),
  38          _sequence_number(0),
  39          _task(NULL) {
  40 
  41   // Other initialization.
  42   _monitor = new Monitor(/* priority */       Mutex::leaf,
  43                          /* name */           "WorkGroup monitor",
  44                          /* allow_vm_block */ are_GC_task_threads,
  45                                               Monitor::_safepoint_check_sometimes);
  46 
  47   assert(monitor() != NULL, "Failed to allocate monitor");
  48 }
  49 
  50 AbstractGangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
  51   return new YieldingFlexibleGangWorker(this, which);
  52 }
  53 
  54 void YieldingFlexibleWorkGang::internal_worker_poll(YieldingWorkData* data) const {
  55   assert(data != NULL, "worker data is null");
  56   data->set_task(task());
  57   data->set_sequence_number(sequence_number());
  58 }
  59 
  60 void YieldingFlexibleWorkGang::internal_note_start() {
  61   assert(monitor()->owned_by_self(), "note_finish is an internal method");
  62   _started_workers += 1;
  63 }
  64 
  65 void YieldingFlexibleWorkGang::internal_note_finish() {
  66   assert(monitor()->owned_by_self(), "note_finish is an internal method");
  67   _finished_workers += 1;
  68 }
  69 
  70 // Run a task; returns when the task is done, or the workers yield,
  71 // or the task is aborted.
  72 // A task that has been yielded can be continued via this interface
  73 // by using the same task repeatedly as the argument to the call.
  74 // It is expected that the YieldingFlexibleGangTask carries the appropriate
  75 // continuation information used by workers to continue the task
  76 // from its last yield point. Thus, a completed task will return
  77 // immediately with no actual work having been done by the workers.
  78 /////////////////////
  79 // Implementatiuon notes: remove before checking XXX
  80 /*
  81 Each gang is working on a task at a certain time.
  82 Some subset of workers may have yielded and some may
  83 have finished their quota of work. Until this task has
  84 been completed, the workers are bound to that task.
  85 Once the task has been completed, the gang unbounds
  86 itself from the task.
  87 
  88 The yielding work gang thus exports two invokation
  89 interfaces: run_task() and continue_task(). The
  90 first is used to initiate a new task and bind it
  91 to the workers; the second is used to continue an
  92 already bound task that has yielded. Upon completion
  93 the binding is released and a new binding may be
  94 created.
  95 
  96 The shape of a yielding work gang is as follows:
  97 
  98 Overseer invokes run_task(*task).
  99    Lock gang monitor
 100    Check that there is no existing binding for the gang
 101    If so, abort with an error
 102    Else, create a new binding of this gang to the given task
 103    Set number of active workers (as asked)
 104    Notify workers that work is ready to be done
 105      [the requisite # workers would then start up
 106       and do the task]
 107    Wait on the monitor until either
 108      all work is completed or the task has yielded
 109      -- this is normally done through
 110         yielded + completed == active
 111         [completed workers are rest to idle state by overseer?]
 112    return appropriate status to caller
 113 
 114 Overseer invokes continue_task(*task),
 115    Lock gang monitor
 116    Check that task is the same as current binding
 117    If not, abort with an error
 118    Else, set the number of active workers as requested?
 119    Notify workers that they can continue from yield points
 120     New workers can also start up as required
 121       while satisfying the constraint that
 122          active + yielded does not exceed required number
 123    Wait (as above).
 124 
 125 NOTE: In the above, for simplicity in a first iteration
 126   our gangs will be of fixed population and will not
 127   therefore be flexible work gangs, just yielding work
 128   gangs. Once this works well, we will in a second
 129   iteration.refinement introduce flexibility into
 130   the work gang.
 131 
 132 NOTE: we can always create a new gang per each iteration
 133   in order to get the flexibility, but we will for now
 134   desist that simplified route.
 135 
 136  */
 137 /////////////////////
 138 void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
 139   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 140   assert(task() == NULL, "Gang currently tied to a task");
 141   assert(new_task != NULL, "Null task");
 142   // Bind task to gang
 143   _task = new_task;
 144   new_task->set_gang(this);  // Establish 2-way binding to support yielding
 145   _sequence_number++;
 146 
 147   uint requested_size = new_task->requested_size();
 148   if (requested_size != 0) {
 149     _active_workers = MIN2(requested_size, total_workers());
 150   } else {
 151     _active_workers = active_workers();
 152   }
 153   new_task->set_actual_size(_active_workers);
 154   new_task->set_for_termination(_active_workers);
 155 
 156   assert(_started_workers == 0, "Tabula rasa non");
 157   assert(_finished_workers == 0, "Tabula rasa non");
 158   assert(_yielded_workers == 0, "Tabula rasa non");
 159   yielding_task()->set_status(ACTIVE);
 160 
 161   // Wake up all the workers, the first few will get to work,
 162   // and the rest will go back to sleep
 163   monitor()->notify_all();
 164   wait_for_gang();
 165 }
 166 
 167 void YieldingFlexibleWorkGang::wait_for_gang() {
 168 
 169   assert(monitor()->owned_by_self(), "Data race");
 170   // Wait for task to complete or yield
 171   for (Status status = yielding_task()->status();
 172        status != COMPLETED && status != YIELDED && status != ABORTED;
 173        status = yielding_task()->status()) {
 174     assert(started_workers() <= active_workers(), "invariant");
 175     assert(finished_workers() <= active_workers(), "invariant");
 176     assert(yielded_workers() <= active_workers(), "invariant");
 177     monitor()->wait(Mutex::_no_safepoint_check_flag);
 178   }
 179   switch (yielding_task()->status()) {
 180     case COMPLETED:
 181     case ABORTED: {
 182       assert(finished_workers() == active_workers(), "Inconsistent status");
 183       assert(yielded_workers() == 0, "Invariant");
 184       reset();   // for next task; gang<->task binding released
 185       break;
 186     }
 187     case YIELDED: {
 188       assert(yielded_workers() > 0, "Invariant");
 189       assert(yielded_workers() + finished_workers() == active_workers(),
 190              "Inconsistent counts");
 191       break;
 192     }
 193     case ACTIVE:
 194     case INACTIVE:
 195     case COMPLETING:
 196     case YIELDING:
 197     case ABORTING:
 198     default:
 199       ShouldNotReachHere();
 200   }
 201 }
 202 
 203 void YieldingFlexibleWorkGang::continue_task(
 204   YieldingFlexibleGangTask* gang_task) {
 205 
 206   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 207   assert(task() != NULL && task() == gang_task, "Incorrect usage");
 208   assert(_started_workers == _active_workers, "Precondition");
 209   assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
 210          "Else why are we calling continue_task()");
 211   // Restart the yielded gang workers
 212   yielding_task()->set_status(ACTIVE);
 213   monitor()->notify_all();
 214   wait_for_gang();
 215 }
 216 
 217 void YieldingFlexibleWorkGang::reset() {
 218   _started_workers  = 0;
 219   _finished_workers = 0;
 220   yielding_task()->set_gang(NULL);
 221   _task = NULL;    // unbind gang from task
 222 }
 223 
 224 void YieldingFlexibleWorkGang::yield() {
 225   assert(task() != NULL, "Inconsistency; should have task binding");
 226   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 227   assert(yielded_workers() < active_workers(), "Consistency check");
 228   if (yielding_task()->status() == ABORTING) {
 229     // Do not yield; we need to abort as soon as possible
 230     // XXX NOTE: This can cause a performance pathology in the
 231     // current implementation in Mustang, as of today, and
 232     // pre-Mustang in that as soon as an overflow occurs,
 233     // yields will not be honoured. The right way to proceed
 234     // of course is to fix bug # TBF, so that abort's cause
 235     // us to return at each potential yield point.
 236     return;
 237   }
 238   if (++_yielded_workers + finished_workers() == active_workers()) {
 239     yielding_task()->set_status(YIELDED);
 240     monitor()->notify_all();
 241   } else {
 242     yielding_task()->set_status(YIELDING);
 243   }
 244 
 245   while (true) {
 246     switch (yielding_task()->status()) {
 247       case YIELDING:
 248       case YIELDED: {
 249         monitor()->wait(Mutex::_no_safepoint_check_flag);
 250         break;  // from switch
 251       }
 252       case ACTIVE:
 253       case ABORTING:
 254       case COMPLETING: {
 255         assert(_yielded_workers > 0, "Else why am i here?");
 256         _yielded_workers--;
 257         return;
 258       }
 259       case INACTIVE:
 260       case ABORTED:
 261       case COMPLETED:
 262       default: {
 263         ShouldNotReachHere();
 264       }
 265     }
 266   }
 267   // Only return is from inside switch statement above
 268   ShouldNotReachHere();
 269 }
 270 
 271 void YieldingFlexibleWorkGang::abort() {
 272   assert(task() != NULL, "Inconsistency; should have task binding");
 273   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 274   assert(yielded_workers() < active_workers(), "Consistency check");
 275   #ifndef PRODUCT
 276     switch (yielding_task()->status()) {
 277       // allowed states
 278       case ACTIVE:
 279       case ABORTING:
 280       case COMPLETING:
 281       case YIELDING:
 282         break;
 283       // not allowed states
 284       case INACTIVE:
 285       case ABORTED:
 286       case COMPLETED:
 287       case YIELDED:
 288       default:
 289         ShouldNotReachHere();
 290     }
 291   #endif // !PRODUCT
 292   Status prev_status = yielding_task()->status();
 293   yielding_task()->set_status(ABORTING);
 294   if (prev_status == YIELDING) {
 295     assert(yielded_workers() > 0, "Inconsistency");
 296     // At least one thread has yielded, wake it up
 297     // so it can go back to waiting stations ASAP.
 298     monitor()->notify_all();
 299   }
 300 }
 301 
 302 ///////////////////////////////
 303 // YieldingFlexibleGangTask
 304 ///////////////////////////////
 305 void YieldingFlexibleGangTask::yield() {
 306   assert(gang() != NULL, "No gang to signal");
 307   gang()->yield();
 308 }
 309 
 310 void YieldingFlexibleGangTask::abort() {
 311   assert(gang() != NULL, "No gang to signal");
 312   gang()->abort();
 313 }
 314 
 315 ///////////////////////////////
 316 // YieldingFlexibleGangWorker
 317 ///////////////////////////////
 318 void YieldingFlexibleGangWorker::loop() {
 319   int previous_sequence_number = 0;
 320   Monitor* gang_monitor = yf_gang()->monitor();
 321   MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag);
 322   YieldingWorkData data;
 323   int id;
 324   while (true) {
 325     // Check if there is work to do.
 326     yf_gang()->internal_worker_poll(&data);
 327     if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
 328       // There is work to be done.
 329       // First check if we need to become active or if there
 330       // are already the requisite number of workers
 331       if (yf_gang()->started_workers() == yf_gang()->active_workers()) {
 332         // There are already enough workers, we do not need to
 333         // to run; fall through and wait on monitor.
 334       } else {
 335         // We need to pitch in and do the work.
 336         assert(yf_gang()->started_workers() < yf_gang()->active_workers(),
 337                "Unexpected state");
 338         id = yf_gang()->started_workers();
 339         yf_gang()->internal_note_start();
 340         // Now, release the gang mutex and do the work.
 341         {
 342           MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
 343           data.task()->work(id);   // This might include yielding
 344         }
 345         // Reacquire monitor and note completion of this worker
 346         yf_gang()->internal_note_finish();
 347         // Update status of task based on whether all workers have
 348         // finished or some have yielded
 349         assert(data.task() == yf_gang()->task(), "Confused task binding");
 350         if (yf_gang()->finished_workers() == yf_gang()->active_workers()) {
 351           switch (data.yf_task()->status()) {
 352             case ABORTING: {
 353               data.yf_task()->set_status(ABORTED);
 354               break;
 355             }
 356             case ACTIVE:
 357             case COMPLETING: {
 358               data.yf_task()->set_status(COMPLETED);
 359               break;
 360             }
 361             default:
 362               ShouldNotReachHere();
 363           }
 364           gang_monitor->notify_all();  // Notify overseer
 365         } else { // at least one worker is still working or yielded
 366           assert(yf_gang()->finished_workers() < yf_gang()->active_workers(),
 367                  "Counts inconsistent");
 368           switch (data.yf_task()->status()) {
 369             case ACTIVE: {
 370               // first, but not only thread to complete
 371               data.yf_task()->set_status(COMPLETING);
 372               break;
 373             }
 374             case YIELDING: {
 375               if (yf_gang()->finished_workers() + yf_gang()->yielded_workers()
 376                   == yf_gang()->active_workers()) {
 377                 data.yf_task()->set_status(YIELDED);
 378                 gang_monitor->notify_all();  // notify overseer
 379               }
 380               break;
 381             }
 382             case ABORTING:
 383             case COMPLETING: {
 384               break; // nothing to do
 385             }
 386             default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
 387               ShouldNotReachHere();
 388           }
 389         }
 390       }
 391     }
 392     // Remember the sequence number
 393     previous_sequence_number = data.sequence_number();
 394     // Wait for more work
 395     gang_monitor->wait(Mutex::_no_safepoint_check_flag);
 396   }
 397 }