1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_workgroup.cpp.incl" 27 28 // Definitions of WorkGang methods. 29 30 AbstractWorkGang::AbstractWorkGang(const char* name, 31 bool are_GC_task_threads, 32 bool are_ConcurrentGC_threads) : 33 _name(name), 34 _are_GC_task_threads(are_GC_task_threads), 35 _are_ConcurrentGC_threads(are_ConcurrentGC_threads) { 36 37 assert(!(are_GC_task_threads && are_ConcurrentGC_threads), 38 "They cannot both be STW GC and Concurrent threads" ); 39 40 // Other initialization. 41 _monitor = new Monitor(/* priority */ Mutex::leaf, 42 /* name */ "WorkGroup monitor", 43 /* allow_vm_block */ are_GC_task_threads); 44 assert(monitor() != NULL, "Failed to allocate monitor"); 45 _terminate = false; 46 _task = NULL; 47 _sequence_number = 0; 48 _started_workers = 0; 49 _finished_workers = 0; 50 } 51 52 WorkGang::WorkGang(const char* name, 53 int workers, 54 bool are_GC_task_threads, 55 bool are_ConcurrentGC_threads) : 56 AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) { 57 // Save arguments. 58 _total_workers = workers; 59 } 60 61 GangWorker* WorkGang::allocate_worker(int which) { 62 GangWorker* new_worker = new GangWorker(this, which); 63 return new_worker; 64 } 65 66 // The current implementation will exit if the allocation 67 // of any worker fails. Still, return a boolean so that 68 // a future implementation can possibly do a partial 69 // initialization of the workers and report such to the 70 // caller. 71 bool WorkGang::initialize_workers() { 72 73 if (TraceWorkGang) { 74 tty->print_cr("Constructing work gang %s with %d threads", 75 name(), 76 total_workers()); 77 } 78 _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, total_workers()); 79 if (gang_workers() == NULL) { 80 vm_exit_out_of_memory(0, "Cannot create GangWorker array."); 81 return false; 82 } 83 os::ThreadType worker_type; 84 if (are_ConcurrentGC_threads()) { 85 worker_type = os::cgc_thread; 86 } else { 87 worker_type = os::pgc_thread; 88 } 89 for (int worker = 0; worker < total_workers(); worker += 1) { 90 GangWorker* new_worker = allocate_worker(worker); 91 assert(new_worker != NULL, "Failed to allocate GangWorker"); 92 _gang_workers[worker] = new_worker; 93 if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) { 94 vm_exit_out_of_memory(0, "Cannot create worker GC thread. Out of system resources."); 95 return false; 96 } 97 if (!DisableStartThread) { 98 os::start_thread(new_worker); 99 } 100 } 101 return true; 102 } 103 104 AbstractWorkGang::~AbstractWorkGang() { 105 if (TraceWorkGang) { 106 tty->print_cr("Destructing work gang %s", name()); 107 } 108 stop(); // stop all the workers 109 for (int worker = 0; worker < total_workers(); worker += 1) { 110 delete gang_worker(worker); 111 } 112 delete gang_workers(); 113 delete monitor(); 114 } 115 116 GangWorker* AbstractWorkGang::gang_worker(int i) const { 117 // Array index bounds checking. 118 GangWorker* result = NULL; 119 assert(gang_workers() != NULL, "No workers for indexing"); 120 assert(((i >= 0) && (i < total_workers())), "Worker index out of bounds"); 121 result = _gang_workers[i]; 122 assert(result != NULL, "Indexing to null worker"); 123 return result; 124 } 125 126 void WorkGang::run_task(AbstractGangTask* task) { 127 // This thread is executed by the VM thread which does not block 128 // on ordinary MutexLocker's. 129 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); 130 if (TraceWorkGang) { 131 tty->print_cr("Running work gang %s task %s", name(), task->name()); 132 } 133 // Tell all the workers to run a task. 134 assert(task != NULL, "Running a null task"); 135 // Initialize. 136 _task = task; 137 _sequence_number += 1; 138 _started_workers = 0; 139 _finished_workers = 0; 140 // Tell the workers to get to work. 141 monitor()->notify_all(); 142 // Wait for them to be finished 143 while (finished_workers() < total_workers()) { 144 if (TraceWorkGang) { 145 tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d", 146 name(), finished_workers(), total_workers(), 147 _sequence_number); 148 } 149 monitor()->wait(/* no_safepoint_check */ true); 150 } 151 _task = NULL; 152 if (TraceWorkGang) { 153 tty->print_cr("/nFinished work gang %s: %d/%d sequence %d", 154 name(), finished_workers(), total_workers(), 155 _sequence_number); 156 } 157 } 158 159 void AbstractWorkGang::stop() { 160 // Tell all workers to terminate, then wait for them to become inactive. 161 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag); 162 if (TraceWorkGang) { 163 tty->print_cr("Stopping work gang %s task %s", name(), task()->name()); 164 } 165 _task = NULL; 166 _terminate = true; 167 monitor()->notify_all(); 168 while (finished_workers() < total_workers()) { 169 if (TraceWorkGang) { 170 tty->print_cr("Waiting in work gang %s: %d/%d finished", 171 name(), finished_workers(), total_workers()); 172 } 173 monitor()->wait(/* no_safepoint_check */ true); 174 } 175 } 176 177 void AbstractWorkGang::internal_worker_poll(WorkData* data) const { 178 assert(monitor()->owned_by_self(), "worker_poll is an internal method"); 179 assert(data != NULL, "worker data is null"); 180 data->set_terminate(terminate()); 181 data->set_task(task()); 182 data->set_sequence_number(sequence_number()); 183 } 184 185 void AbstractWorkGang::internal_note_start() { 186 assert(monitor()->owned_by_self(), "note_finish is an internal method"); 187 _started_workers += 1; 188 } 189 190 void AbstractWorkGang::internal_note_finish() { 191 assert(monitor()->owned_by_self(), "note_finish is an internal method"); 192 _finished_workers += 1; 193 } 194 195 void AbstractWorkGang::print_worker_threads_on(outputStream* st) const { 196 uint num_thr = total_workers(); 197 for (uint i = 0; i < num_thr; i++) { 198 gang_worker(i)->print_on(st); 199 st->cr(); 200 } 201 } 202 203 void AbstractWorkGang::threads_do(ThreadClosure* tc) const { 204 assert(tc != NULL, "Null ThreadClosure"); 205 uint num_thr = total_workers(); 206 for (uint i = 0; i < num_thr; i++) { 207 tc->do_thread(gang_worker(i)); 208 } 209 } 210 211 // GangWorker methods. 212 213 GangWorker::GangWorker(AbstractWorkGang* gang, uint id) { 214 _gang = gang; 215 set_id(id); 216 set_name("Gang worker#%d (%s)", id, gang->name()); 217 } 218 219 void GangWorker::run() { 220 initialize(); 221 loop(); 222 } 223 224 void GangWorker::initialize() { 225 this->initialize_thread_local_storage(); 226 assert(_gang != NULL, "No gang to run in"); 227 os::set_priority(this, NearMaxPriority); 228 if (TraceWorkGang) { 229 tty->print_cr("Running gang worker for gang %s id %d", 230 gang()->name(), id()); 231 } 232 // The VM thread should not execute here because MutexLocker's are used 233 // as (opposed to MutexLockerEx's). 234 assert(!Thread::current()->is_VM_thread(), "VM thread should not be part" 235 " of a work gang"); 236 } 237 238 void GangWorker::loop() { 239 int previous_sequence_number = 0; 240 Monitor* gang_monitor = gang()->monitor(); 241 for ( ; /* !terminate() */; ) { 242 WorkData data; 243 int part; // Initialized below. 244 { 245 // Grab the gang mutex. 246 MutexLocker ml(gang_monitor); 247 // Wait for something to do. 248 // Polling outside the while { wait } avoids missed notifies 249 // in the outer loop. 250 gang()->internal_worker_poll(&data); 251 if (TraceWorkGang) { 252 tty->print("Polled outside for work in gang %s worker %d", 253 gang()->name(), id()); 254 tty->print(" terminate: %s", 255 data.terminate() ? "true" : "false"); 256 tty->print(" sequence: %d (prev: %d)", 257 data.sequence_number(), previous_sequence_number); 258 if (data.task() != NULL) { 259 tty->print(" task: %s", data.task()->name()); 260 } else { 261 tty->print(" task: NULL"); 262 } 263 tty->cr(); 264 } 265 for ( ; /* break or return */; ) { 266 // Terminate if requested. 267 if (data.terminate()) { 268 gang()->internal_note_finish(); 269 gang_monitor->notify_all(); 270 return; 271 } 272 // Check for new work. 273 if ((data.task() != NULL) && 274 (data.sequence_number() != previous_sequence_number)) { 275 gang()->internal_note_start(); 276 gang_monitor->notify_all(); 277 part = gang()->started_workers() - 1; 278 break; 279 } 280 // Nothing to do. 281 gang_monitor->wait(/* no_safepoint_check */ true); 282 gang()->internal_worker_poll(&data); 283 if (TraceWorkGang) { 284 tty->print("Polled inside for work in gang %s worker %d", 285 gang()->name(), id()); 286 tty->print(" terminate: %s", 287 data.terminate() ? "true" : "false"); 288 tty->print(" sequence: %d (prev: %d)", 289 data.sequence_number(), previous_sequence_number); 290 if (data.task() != NULL) { 291 tty->print(" task: %s", data.task()->name()); 292 } else { 293 tty->print(" task: NULL"); 294 } 295 tty->cr(); 296 } 297 } 298 // Drop gang mutex. 299 } 300 if (TraceWorkGang) { 301 tty->print("Work for work gang %s id %d task %s part %d", 302 gang()->name(), id(), data.task()->name(), part); 303 } 304 assert(data.task() != NULL, "Got null task"); 305 data.task()->work(part); 306 { 307 if (TraceWorkGang) { 308 tty->print("Finish for work gang %s id %d task %s part %d", 309 gang()->name(), id(), data.task()->name(), part); 310 } 311 // Grab the gang mutex. 312 MutexLocker ml(gang_monitor); 313 gang()->internal_note_finish(); 314 // Tell the gang you are done. 315 gang_monitor->notify_all(); 316 // Drop the gang mutex. 317 } 318 previous_sequence_number = data.sequence_number(); 319 } 320 } 321 322 bool GangWorker::is_GC_task_thread() const { 323 return gang()->are_GC_task_threads(); 324 } 325 326 bool GangWorker::is_ConcurrentGC_thread() const { 327 return gang()->are_ConcurrentGC_threads(); 328 } 329 330 void GangWorker::print_on(outputStream* st) const { 331 st->print("\"%s\" ", name()); 332 Thread::print_on(st); 333 st->cr(); 334 } 335 336 // Printing methods 337 338 const char* AbstractWorkGang::name() const { 339 return _name; 340 } 341 342 #ifndef PRODUCT 343 344 const char* AbstractGangTask::name() const { 345 return _name; 346 } 347 348 #endif /* PRODUCT */ 349 350 // *** WorkGangBarrierSync 351 352 WorkGangBarrierSync::WorkGangBarrierSync() 353 : _monitor(Mutex::safepoint, "work gang barrier sync", true), 354 _n_workers(0), _n_completed(0), _should_reset(false) { 355 } 356 357 WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name) 358 : _monitor(Mutex::safepoint, name, true), 359 _n_workers(n_workers), _n_completed(0), _should_reset(false) { 360 } 361 362 void WorkGangBarrierSync::set_n_workers(int n_workers) { 363 _n_workers = n_workers; 364 _n_completed = 0; 365 _should_reset = false; 366 } 367 368 void WorkGangBarrierSync::enter() { 369 MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); 370 if (should_reset()) { 371 // The should_reset() was set and we are the first worker to enter 372 // the sync barrier. We will zero the n_completed() count which 373 // effectively resets the barrier. 374 zero_completed(); 375 set_should_reset(false); 376 } 377 inc_completed(); 378 if (n_completed() == n_workers()) { 379 // At this point we would like to reset the barrier to be ready in 380 // case it is used again. However, we cannot set n_completed() to 381 // 0, even after the notify_all(), given that some other workers 382 // might still be waiting for n_completed() to become == 383 // n_workers(). So, if we set n_completed() to 0, those workers 384 // will get stuck (as they will wake up, see that n_completed() != 385 // n_workers() and go back to sleep). Instead, we raise the 386 // should_reset() flag and the barrier will be reset the first 387 // time a worker enters it again. 388 set_should_reset(true); 389 monitor()->notify_all(); 390 } else { 391 while (n_completed() != n_workers()) { 392 monitor()->wait(/* no_safepoint_check */ true); 393 } 394 } 395 } 396 397 // SubTasksDone functions. 398 399 SubTasksDone::SubTasksDone(int n) : 400 _n_tasks(n), _n_threads(1), _tasks(NULL) { 401 _tasks = NEW_C_HEAP_ARRAY(jint, n); 402 guarantee(_tasks != NULL, "alloc failure"); 403 clear(); 404 } 405 406 bool SubTasksDone::valid() { 407 return _tasks != NULL; 408 } 409 410 void SubTasksDone::set_n_threads(int t) { 411 #ifdef ASSERT 412 assert(_claimed == 0 || _threads_completed == _n_threads, 413 "should not be called while tasks are being processed!"); 414 #endif 415 _n_threads = (t == 0 ? 1 : t); 416 } 417 418 void SubTasksDone::clear() { 419 for (int i = 0; i < _n_tasks; i++) { 420 _tasks[i] = 0; 421 } 422 _threads_completed = 0; 423 #ifdef ASSERT 424 _claimed = 0; 425 #endif 426 } 427 428 bool SubTasksDone::is_task_claimed(int t) { 429 assert(0 <= t && t < _n_tasks, "bad task id."); 430 jint old = _tasks[t]; 431 if (old == 0) { 432 old = Atomic::cmpxchg(1, &_tasks[t], 0); 433 } 434 assert(_tasks[t] == 1, "What else?"); 435 bool res = old != 0; 436 #ifdef ASSERT 437 if (!res) { 438 assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?"); 439 Atomic::inc(&_claimed); 440 } 441 #endif 442 return res; 443 } 444 445 void SubTasksDone::all_tasks_completed() { 446 jint observed = _threads_completed; 447 jint old; 448 do { 449 old = observed; 450 observed = Atomic::cmpxchg(old+1, &_threads_completed, old); 451 } while (observed != old); 452 // If this was the last thread checking in, clear the tasks. 453 if (observed+1 == _n_threads) clear(); 454 } 455 456 457 SubTasksDone::~SubTasksDone() { 458 if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks); 459 } 460 461 // *** SequentialSubTasksDone 462 463 void SequentialSubTasksDone::clear() { 464 _n_tasks = _n_claimed = 0; 465 _n_threads = _n_completed = 0; 466 } 467 468 bool SequentialSubTasksDone::valid() { 469 return _n_threads > 0; 470 } 471 472 bool SequentialSubTasksDone::is_task_claimed(int& t) { 473 jint* n_claimed_ptr = &_n_claimed; 474 t = *n_claimed_ptr; 475 while (t < _n_tasks) { 476 jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t); 477 if (res == t) { 478 return false; 479 } 480 t = *n_claimed_ptr; 481 } 482 return true; 483 } 484 485 bool SequentialSubTasksDone::all_tasks_completed() { 486 jint* n_completed_ptr = &_n_completed; 487 jint complete = *n_completed_ptr; 488 while (true) { 489 jint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete); 490 if (res == complete) { 491 break; 492 } 493 complete = res; 494 } 495 if (complete+1 == _n_threads) { 496 clear(); 497 return true; 498 } 499 return false; 500 } 501 502 bool FreeIdSet::_stat_init = false; 503 FreeIdSet* FreeIdSet::_sets[NSets]; 504 bool FreeIdSet::_safepoint; 505 506 FreeIdSet::FreeIdSet(int sz, Monitor* mon) : 507 _sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0) 508 { 509 _ids = new int[sz]; 510 for (int i = 0; i < sz; i++) _ids[i] = i+1; 511 _ids[sz-1] = end_of_list; // end of list. 512 if (_stat_init) { 513 for (int j = 0; j < NSets; j++) _sets[j] = NULL; 514 _stat_init = true; 515 } 516 // Add to sets. (This should happen while the system is still single-threaded.) 517 for (int j = 0; j < NSets; j++) { 518 if (_sets[j] == NULL) { 519 _sets[j] = this; 520 _index = j; 521 break; 522 } 523 } 524 guarantee(_index != -1, "Too many FreeIdSets in use!"); 525 } 526 527 FreeIdSet::~FreeIdSet() { 528 _sets[_index] = NULL; 529 } 530 531 void FreeIdSet::set_safepoint(bool b) { 532 _safepoint = b; 533 if (b) { 534 for (int j = 0; j < NSets; j++) { 535 if (_sets[j] != NULL && _sets[j]->_waiters > 0) { 536 Monitor* mon = _sets[j]->_mon; 537 mon->lock_without_safepoint_check(); 538 mon->notify_all(); 539 mon->unlock(); 540 } 541 } 542 } 543 } 544 545 #define FID_STATS 0 546 547 int FreeIdSet::claim_par_id() { 548 #if FID_STATS 549 thread_t tslf = thr_self(); 550 tty->print("claim_par_id[%d]: sz = %d, claimed = %d\n", tslf, _sz, _claimed); 551 #endif 552 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 553 while (!_safepoint && _hd == end_of_list) { 554 _waiters++; 555 #if FID_STATS 556 if (_waiters > 5) { 557 tty->print("claim_par_id waiting[%d]: %d waiters, %d claimed.\n", 558 tslf, _waiters, _claimed); 559 } 560 #endif 561 _mon->wait(Mutex::_no_safepoint_check_flag); 562 _waiters--; 563 } 564 if (_hd == end_of_list) { 565 #if FID_STATS 566 tty->print("claim_par_id[%d]: returning EOL.\n", tslf); 567 #endif 568 return -1; 569 } else { 570 int res = _hd; 571 _hd = _ids[res]; 572 _ids[res] = claimed; // For debugging. 573 _claimed++; 574 #if FID_STATS 575 tty->print("claim_par_id[%d]: returning %d, claimed = %d.\n", 576 tslf, res, _claimed); 577 #endif 578 return res; 579 } 580 } 581 582 bool FreeIdSet::claim_perm_id(int i) { 583 assert(0 <= i && i < _sz, "Out of range."); 584 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 585 int prev = end_of_list; 586 int cur = _hd; 587 while (cur != end_of_list) { 588 if (cur == i) { 589 if (prev == end_of_list) { 590 _hd = _ids[cur]; 591 } else { 592 _ids[prev] = _ids[cur]; 593 } 594 _ids[cur] = claimed; 595 _claimed++; 596 return true; 597 } else { 598 prev = cur; 599 cur = _ids[cur]; 600 } 601 } 602 return false; 603 604 } 605 606 void FreeIdSet::release_par_id(int id) { 607 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag); 608 assert(_ids[id] == claimed, "Precondition."); 609 _ids[id] = _hd; 610 _hd = id; 611 _claimed--; 612 #if FID_STATS 613 tty->print("[%d] release_par_id(%d), waiters =%d, claimed = %d.\n", 614 thr_self(), id, _waiters, _claimed); 615 #endif 616 if (_waiters > 0) 617 // Notify all would be safer, but this is OK, right? 618 _mon->notify_all(); 619 }