< prev index next >

src/share/vm/gc/shared/workgroup.cpp

Print this page




 417     // time a worker enters it again.
 418     set_should_reset(true);
 419     monitor()->notify_all();
 420   } else {
 421     while (n_completed() != n_workers() && !aborted()) {
 422       monitor()->wait(/* no_safepoint_check */ true);
 423     }
 424   }
 425   return !aborted();
 426 }
 427 
 428 void WorkGangBarrierSync::abort() {
 429   MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
 430   set_aborted();
 431   monitor()->notify_all();
 432 }
 433 
 434 // SubTasksDone functions.
 435 
 436 SubTasksDone::SubTasksDone(uint n) :
 437   _n_tasks(n), _n_threads(1), _tasks(NULL) {
 438   _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
 439   guarantee(_tasks != NULL, "alloc failure");
 440   clear();
 441 }
 442 
 443 bool SubTasksDone::valid() {
 444   return _tasks != NULL;
 445 }
 446 
 447 void SubTasksDone::set_n_threads(uint t) {
 448   assert(_claimed == 0 || _threads_completed == _n_threads,
 449          "should not be called while tasks are being processed!");
 450   _n_threads = (t == 0 ? 1 : t);
 451 }
 452 
 453 void SubTasksDone::clear() {
 454   for (uint i = 0; i < _n_tasks; i++) {
 455     _tasks[i] = 0;
 456   }
 457   _threads_completed = 0;
 458 #ifdef ASSERT
 459   _claimed = 0;
 460 #endif
 461 }
 462 
 463 bool SubTasksDone::is_task_claimed(uint t) {
 464   assert(t < _n_tasks, "bad task id.");
 465   uint old = _tasks[t];
 466   if (old == 0) {
 467     old = Atomic::cmpxchg(1, &_tasks[t], 0);
 468   }
 469   assert(_tasks[t] == 1, "What else?");
 470   bool res = old != 0;
 471 #ifdef ASSERT
 472   if (!res) {
 473     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
 474     Atomic::inc((volatile jint*) &_claimed);
 475   }
 476 #endif
 477   return res;
 478 }
 479 
 480 void SubTasksDone::all_tasks_completed() {
 481   jint observed = _threads_completed;
 482   jint old;
 483   do {
 484     old = observed;
 485     observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
 486   } while (observed != old);
 487   // If this was the last thread checking in, clear the tasks.
 488   if (observed+1 == (jint)_n_threads) clear();



 489 }
 490 
 491 
 492 SubTasksDone::~SubTasksDone() {
 493   if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks);
 494 }
 495 
 496 // *** SequentialSubTasksDone
 497 
 498 void SequentialSubTasksDone::clear() {
 499   _n_tasks   = _n_claimed   = 0;
 500   _n_threads = _n_completed = 0;
 501 }
 502 
 503 bool SequentialSubTasksDone::valid() {
 504   return _n_threads > 0;
 505 }
 506 
 507 bool SequentialSubTasksDone::is_task_claimed(uint& t) {
 508   uint* n_claimed_ptr = &_n_claimed;




 417     // time a worker enters it again.
 418     set_should_reset(true);
 419     monitor()->notify_all();
 420   } else {
 421     while (n_completed() != n_workers() && !aborted()) {
 422       monitor()->wait(/* no_safepoint_check */ true);
 423     }
 424   }
 425   return !aborted();
 426 }
 427 
 428 void WorkGangBarrierSync::abort() {
 429   MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
 430   set_aborted();
 431   monitor()->notify_all();
 432 }
 433 
 434 // SubTasksDone functions.
 435 
 436 SubTasksDone::SubTasksDone(uint n) :
 437   _n_tasks(n), _tasks(NULL) {
 438   _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
 439   guarantee(_tasks != NULL, "alloc failure");
 440   clear();
 441 }
 442 
 443 bool SubTasksDone::valid() {
 444   return _tasks != NULL;
 445 }
 446 






 447 void SubTasksDone::clear() {
 448   for (uint i = 0; i < _n_tasks; i++) {
 449     _tasks[i] = 0;
 450   }
 451   _threads_completed = 0;
 452 #ifdef ASSERT
 453   _claimed = 0;
 454 #endif
 455 }
 456 
 457 bool SubTasksDone::is_task_claimed(uint t) {
 458   assert(t < _n_tasks, "bad task id.");
 459   uint old = _tasks[t];
 460   if (old == 0) {
 461     old = Atomic::cmpxchg(1, &_tasks[t], 0);
 462   }
 463   assert(_tasks[t] == 1, "What else?");
 464   bool res = old != 0;
 465 #ifdef ASSERT
 466   if (!res) {
 467     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
 468     Atomic::inc((volatile jint*) &_claimed);
 469   }
 470 #endif
 471   return res;
 472 }
 473 
 474 void SubTasksDone::all_tasks_completed(uint n_threads) {
 475   jint observed = _threads_completed;
 476   jint old;
 477   do {
 478     old = observed;
 479     observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
 480   } while (observed != old);
 481   // If this was the last thread checking in, clear the tasks.
 482   uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
 483   if (observed + 1 == (jint)adjusted_thread_count) {
 484     clear();
 485   }
 486 }
 487 
 488 
 489 SubTasksDone::~SubTasksDone() {
 490   if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks);
 491 }
 492 
 493 // *** SequentialSubTasksDone
 494 
 495 void SequentialSubTasksDone::clear() {
 496   _n_tasks   = _n_claimed   = 0;
 497   _n_threads = _n_completed = 0;
 498 }
 499 
 500 bool SequentialSubTasksDone::valid() {
 501   return _n_threads > 0;
 502 }
 503 
 504 bool SequentialSubTasksDone::is_task_claimed(uint& t) {
 505   uint* n_claimed_ptr = &_n_claimed;


< prev index next >