1 /*
   2  * Copyright (c) 2005, 2010 Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_yieldingWorkgroup.cpp.incl"
  27 
  28 // Forward declaration of classes declared here.
  29 
  30 class GangWorker;
  31 class WorkData;
  32 
  33 YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
  34   const char* name, int workers, bool are_GC_task_threads) :
  35   FlexibleWorkGang(name, workers, are_GC_task_threads, false),
  36     _yielded_workers(0) {}
  37 
  38 GangWorker* YieldingFlexibleWorkGang::allocate_worker(int which) {
  39   YieldingFlexibleGangWorker* new_member =
  40       new YieldingFlexibleGangWorker(this, which);
  41   return (YieldingFlexibleGangWorker*) new_member;
  42 }
  43 
  44 // Run a task; returns when the task is done, or the workers yield,
  45 // or the task is aborted, or the work gang is terminated via stop().
  46 // A task that has been yielded can be continued via this interface
  47 // by using the same task repeatedly as the argument to the call.
  48 // It is expected that the YieldingFlexibleGangTask carries the appropriate
  49 // continuation information used by workers to continue the task
  50 // from its last yield point. Thus, a completed task will return
  51 // immediately with no actual work having been done by the workers.
  52 /////////////////////
  53 // Implementatiuon notes: remove before checking XXX
  54 /*
  55 Each gang is working on a task at a certain time.
  56 Some subset of workers may have yielded and some may
  57 have finished their quota of work. Until this task has
  58 been completed, the workers are bound to that task.
  59 Once the task has been completed, the gang unbounds
  60 itself from the task.
  61 
  62 The yielding work gang thus exports two invokation
  63 interfaces: run_task() and continue_task(). The
  64 first is used to initiate a new task and bind it
  65 to the workers; the second is used to continue an
  66 already bound task that has yielded. Upon completion
  67 the binding is released and a new binding may be
  68 created.
  69 
  70 The shape of a yielding work gang is as follows:
  71 
  72 Overseer invokes run_task(*task).
  73    Lock gang monitor
  74    Check that there is no existing binding for the gang
  75    If so, abort with an error
  76    Else, create a new binding of this gang to the given task
  77    Set number of active workers (as asked)
  78    Notify workers that work is ready to be done
  79      [the requisite # workers would then start up
  80       and do the task]
  81    Wait on the monitor until either
  82      all work is completed or the task has yielded
  83      -- this is normally done through
  84         yielded + completed == active
  85         [completed workers are rest to idle state by overseer?]
  86    return appropriate status to caller
  87 
  88 Overseer invokes continue_task(*task),
  89    Lock gang monitor
  90    Check that task is the same as current binding
  91    If not, abort with an error
  92    Else, set the number of active workers as requested?
  93    Notify workers that they can continue from yield points
  94     New workers can also start up as required
  95       while satisfying the constraint that
  96          active + yielded does not exceed required number
  97    Wait (as above).
  98 
  99 NOTE: In the above, for simplicity in a first iteration
 100   our gangs will be of fixed population and will not
 101   therefore be flexible work gangs, just yielding work
 102   gangs. Once this works well, we will in a second
 103   iteration.refinement introduce flexibility into
 104   the work gang.
 105 
 106 NOTE: we can always create a new gang per each iteration
 107   in order to get the flexibility, but we will for now
 108   desist that simplified route.
 109 
 110  */
 111 /////////////////////
 112 void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
 113   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 114   assert(task() == NULL, "Gang currently tied to a task");
 115   assert(new_task != NULL, "Null task");
 116   // Bind task to gang
 117   _task = new_task;
 118   new_task->set_gang(this);  // Establish 2-way binding to support yielding
 119   _sequence_number++;
 120 
 121   int requested_size = new_task->requested_size();
 122   assert(requested_size >= 0, "Should be non-negative");
 123   if (requested_size != 0) {
 124     _active_workers = MIN2(requested_size, total_workers());
 125   } else {
 126     _active_workers = total_workers();
 127   }
 128   new_task->set_actual_size(_active_workers);
 129   new_task->set_for_termination(_active_workers);
 130 
 131   assert(_started_workers == 0, "Tabula rasa non");
 132   assert(_finished_workers == 0, "Tabula rasa non");
 133   assert(_yielded_workers == 0, "Tabula rasa non");
 134   yielding_task()->set_status(ACTIVE);
 135 
 136   // Wake up all the workers, the first few will get to work,
 137   // and the rest will go back to sleep
 138   monitor()->notify_all();
 139   wait_for_gang();
 140 }
 141 
 142 void YieldingFlexibleWorkGang::wait_for_gang() {
 143 
 144   assert(monitor()->owned_by_self(), "Data race");
 145   // Wait for task to complete or yield
 146   for (Status status = yielding_task()->status();
 147        status != COMPLETED && status != YIELDED && status != ABORTED;
 148        status = yielding_task()->status()) {
 149     assert(started_workers() <= total_workers(), "invariant");
 150     assert(finished_workers() <= total_workers(), "invariant");
 151     assert(yielded_workers() <= total_workers(), "invariant");
 152     monitor()->wait(Mutex::_no_safepoint_check_flag);
 153   }
 154   switch (yielding_task()->status()) {
 155     case COMPLETED:
 156     case ABORTED: {
 157       assert(finished_workers() == total_workers(), "Inconsistent status");
 158       assert(yielded_workers() == 0, "Invariant");
 159       reset();   // for next task; gang<->task binding released
 160       break;
 161     }
 162     case YIELDED: {
 163       assert(yielded_workers() > 0, "Invariant");
 164       assert(yielded_workers() + finished_workers() == total_workers(),
 165              "Inconsistent counts");
 166       break;
 167     }
 168     case ACTIVE:
 169     case INACTIVE:
 170     case COMPLETING:
 171     case YIELDING:
 172     case ABORTING:
 173     default:
 174       ShouldNotReachHere();
 175   }
 176 }
 177 
 178 void YieldingFlexibleWorkGang::continue_task(
 179   YieldingFlexibleGangTask* gang_task) {
 180 
 181   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 182   assert(task() != NULL && task() == gang_task, "Incorrect usage");
 183   // assert(_active_workers == total_workers(), "For now");
 184   assert(_started_workers == _active_workers, "Precondition");
 185   assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
 186          "Else why are we calling continue_task()");
 187   // Restart the yielded gang workers
 188   yielding_task()->set_status(ACTIVE);
 189   monitor()->notify_all();
 190   wait_for_gang();
 191 }
 192 
 193 void YieldingFlexibleWorkGang::reset() {
 194   _started_workers  = 0;
 195   _finished_workers = 0;
 196   yielding_task()->set_gang(NULL);
 197   _task = NULL;    // unbind gang from task
 198 }
 199 
 200 void YieldingFlexibleWorkGang::yield() {
 201   assert(task() != NULL, "Inconsistency; should have task binding");
 202   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 203   assert(yielded_workers() < total_workers(), "Consistency check");
 204   if (yielding_task()->status() == ABORTING) {
 205     // Do not yield; we need to abort as soon as possible
 206     // XXX NOTE: This can cause a performance pathology in the
 207     // current implementation in Mustang, as of today, and
 208     // pre-Mustang in that as soon as an overflow occurs,
 209     // yields will not be honoured. The right way to proceed
 210     // of course is to fix bug # TBF, so that abort's cause
 211     // us to return at each potential yield point.
 212     return;
 213   }
 214   if (++_yielded_workers + finished_workers() == total_workers()) {
 215     yielding_task()->set_status(YIELDED);
 216     monitor()->notify_all();
 217   } else {
 218     yielding_task()->set_status(YIELDING);
 219   }
 220 
 221   while (true) {
 222     switch (yielding_task()->status()) {
 223       case YIELDING:
 224       case YIELDED: {
 225         monitor()->wait(Mutex::_no_safepoint_check_flag);
 226         break;  // from switch
 227       }
 228       case ACTIVE:
 229       case ABORTING:
 230       case COMPLETING: {
 231         assert(_yielded_workers > 0, "Else why am i here?");
 232         _yielded_workers--;
 233         return;
 234       }
 235       case INACTIVE:
 236       case ABORTED:
 237       case COMPLETED:
 238       default: {
 239         ShouldNotReachHere();
 240       }
 241     }
 242   }
 243   // Only return is from inside switch statement above
 244   ShouldNotReachHere();
 245 }
 246 
 247 void YieldingFlexibleWorkGang::abort() {
 248   assert(task() != NULL, "Inconsistency; should have task binding");
 249   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 250   assert(yielded_workers() < active_workers(), "Consistency check");
 251   #ifndef PRODUCT
 252     switch (yielding_task()->status()) {
 253       // allowed states
 254       case ACTIVE:
 255       case ABORTING:
 256       case COMPLETING:
 257       case YIELDING:
 258         break;
 259       // not allowed states
 260       case INACTIVE:
 261       case ABORTED:
 262       case COMPLETED:
 263       case YIELDED:
 264       default:
 265         ShouldNotReachHere();
 266     }
 267   #endif // !PRODUCT
 268   Status prev_status = yielding_task()->status();
 269   yielding_task()->set_status(ABORTING);
 270   if (prev_status == YIELDING) {
 271     assert(yielded_workers() > 0, "Inconsistency");
 272     // At least one thread has yielded, wake it up
 273     // so it can go back to waiting stations ASAP.
 274     monitor()->notify_all();
 275   }
 276 }
 277 
 278 ///////////////////////////////
 279 // YieldingFlexibleGangTask
 280 ///////////////////////////////
 281 void YieldingFlexibleGangTask::yield() {
 282   assert(gang() != NULL, "No gang to signal");
 283   gang()->yield();
 284 }
 285 
 286 void YieldingFlexibleGangTask::abort() {
 287   assert(gang() != NULL, "No gang to signal");
 288   gang()->abort();
 289 }
 290 
 291 ///////////////////////////////
 292 // YieldingFlexibleGangWorker
 293 ///////////////////////////////
 294 void YieldingFlexibleGangWorker::loop() {
 295   int previous_sequence_number = 0;
 296   Monitor* gang_monitor = gang()->monitor();
 297   MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag);
 298   WorkData data;
 299   int id;
 300   while (true) {
 301     // Check if there is work to do or if we have been asked
 302     // to terminate
 303     gang()->internal_worker_poll(&data);
 304     if (data.terminate()) {
 305       // We have been asked to terminate.
 306       assert(gang()->task() == NULL, "No task binding");
 307       // set_status(TERMINATED);
 308       return;
 309     } else if (data.task() != NULL &&
 310                data.sequence_number() != previous_sequence_number) {
 311       // There is work to be done.
 312       // First check if we need to become active or if there
 313       // are already the requisite number of workers
 314       if (gang()->started_workers() == yf_gang()->active_workers()) {
 315         // There are already enough workers, we do not need to
 316         // to run; fall through and wait on monitor.
 317       } else {
 318         // We need to pitch in and do the work.
 319         assert(gang()->started_workers() < yf_gang()->active_workers(),
 320                "Unexpected state");
 321         id = gang()->started_workers();
 322         gang()->internal_note_start();
 323         // Now, release the gang mutex and do the work.
 324         {
 325           MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
 326           data.task()->work(id);   // This might include yielding
 327         }
 328         // Reacquire monitor and note completion of this worker
 329         gang()->internal_note_finish();
 330         // Update status of task based on whether all workers have
 331         // finished or some have yielded
 332         assert(data.task() == gang()->task(), "Confused task binding");
 333         if (gang()->finished_workers() == yf_gang()->active_workers()) {
 334           switch (data.yf_task()->status()) {
 335             case ABORTING: {
 336               data.yf_task()->set_status(ABORTED);
 337               break;
 338             }
 339             case ACTIVE:
 340             case COMPLETING: {
 341               data.yf_task()->set_status(COMPLETED);
 342               break;
 343             }
 344             default:
 345               ShouldNotReachHere();
 346           }
 347           gang_monitor->notify_all();  // Notify overseer
 348         } else { // at least one worker is still working or yielded
 349           assert(gang()->finished_workers() < yf_gang()->active_workers(),
 350                  "Counts inconsistent");
 351           switch (data.yf_task()->status()) {
 352             case ACTIVE: {
 353               // first, but not only thread to complete
 354               data.yf_task()->set_status(COMPLETING);
 355               break;
 356             }
 357             case YIELDING: {
 358               if (gang()->finished_workers() + yf_gang()->yielded_workers()
 359                   == yf_gang()->active_workers()) {
 360                 data.yf_task()->set_status(YIELDED);
 361                 gang_monitor->notify_all();  // notify overseer
 362               }
 363               break;
 364             }
 365             case ABORTING:
 366             case COMPLETING: {
 367               break; // nothing to do
 368             }
 369             default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
 370               ShouldNotReachHere();
 371           }
 372         }
 373       }
 374     }
 375     // Remember the sequence number
 376     previous_sequence_number = data.sequence_number();
 377     // Wait for more work
 378     gang_monitor->wait(Mutex::_no_safepoint_check_flag);
 379   }
 380 }