< prev index next >

src/share/vm/utilities/workgroup.cpp

Print this page
rev 7793 : 8073315: Enable gcc -Wtype-limits and fix upcoming issues.


 107   }
 108   return true;
 109 }
 110 
 111 AbstractWorkGang::~AbstractWorkGang() {
 112   if (TraceWorkGang) {
 113     tty->print_cr("Destructing work gang %s", name());
 114   }
 115   stop();   // stop all the workers
 116   for (uint worker = 0; worker < total_workers(); worker += 1) {
 117     delete gang_worker(worker);
 118   }
 119   delete gang_workers();
 120   delete monitor();
 121 }
 122 
 123 GangWorker* AbstractWorkGang::gang_worker(uint i) const {
 124   // Array index bounds checking.
 125   GangWorker* result = NULL;
 126   assert(gang_workers() != NULL, "No workers for indexing");
 127   assert(((i >= 0) && (i < total_workers())), "Worker index out of bounds");
 128   result = _gang_workers[i];
 129   assert(result != NULL, "Indexing to null worker");
 130   return result;
 131 }
 132 
 133 void WorkGang::run_task(AbstractGangTask* task) {
 134   run_task(task, total_workers());
 135 }
 136 
 137 void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) {
 138   task->set_for_termination(no_of_parallel_workers);
 139 
 140   // This thread is executed by the VM thread which does not block
 141   // on ordinary MutexLocker's.
 142   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 143   if (TraceWorkGang) {
 144     tty->print_cr("Running work gang %s task %s", name(), task->name());
 145   }
 146   // Tell all the workers to run a task.
 147   assert(task != NULL, "Running a null task");


 445   return _tasks != NULL;
 446 }
 447 
 448 void SubTasksDone::set_n_threads(uint t) {
 449   assert(_claimed == 0 || _threads_completed == _n_threads,
 450          "should not be called while tasks are being processed!");
 451   _n_threads = (t == 0 ? 1 : t);
 452 }
 453 
 454 void SubTasksDone::clear() {
 455   for (uint i = 0; i < _n_tasks; i++) {
 456     _tasks[i] = 0;
 457   }
 458   _threads_completed = 0;
 459 #ifdef ASSERT
 460   _claimed = 0;
 461 #endif
 462 }
 463 
 464 bool SubTasksDone::is_task_claimed(uint t) {
 465   assert(0 <= t && t < _n_tasks, "bad task id.");
 466   uint old = _tasks[t];
 467   if (old == 0) {
 468     old = Atomic::cmpxchg(1, &_tasks[t], 0);
 469   }
 470   assert(_tasks[t] == 1, "What else?");
 471   bool res = old != 0;
 472 #ifdef ASSERT
 473   if (!res) {
 474     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
 475     Atomic::inc((volatile jint*) &_claimed);
 476   }
 477 #endif
 478   return res;
 479 }
 480 
 481 void SubTasksDone::all_tasks_completed() {
 482   jint observed = _threads_completed;
 483   jint old;
 484   do {
 485     old = observed;




 107   }
 108   return true;
 109 }
 110 
 111 AbstractWorkGang::~AbstractWorkGang() {
 112   if (TraceWorkGang) {
 113     tty->print_cr("Destructing work gang %s", name());
 114   }
 115   stop();   // stop all the workers
 116   for (uint worker = 0; worker < total_workers(); worker += 1) {
 117     delete gang_worker(worker);
 118   }
 119   delete gang_workers();
 120   delete monitor();
 121 }
 122 
 123 GangWorker* AbstractWorkGang::gang_worker(uint i) const {
 124   // Array index bounds checking.
 125   GangWorker* result = NULL;
 126   assert(gang_workers() != NULL, "No workers for indexing");
 127   assert(i < total_workers(), "Worker index out of bounds");
 128   result = _gang_workers[i];
 129   assert(result != NULL, "Indexing to null worker");
 130   return result;
 131 }
 132 
 133 void WorkGang::run_task(AbstractGangTask* task) {
 134   run_task(task, total_workers());
 135 }
 136 
 137 void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) {
 138   task->set_for_termination(no_of_parallel_workers);
 139 
 140   // This thread is executed by the VM thread which does not block
 141   // on ordinary MutexLocker's.
 142   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
 143   if (TraceWorkGang) {
 144     tty->print_cr("Running work gang %s task %s", name(), task->name());
 145   }
 146   // Tell all the workers to run a task.
 147   assert(task != NULL, "Running a null task");


 445   return _tasks != NULL;
 446 }
 447 
 448 void SubTasksDone::set_n_threads(uint t) {
 449   assert(_claimed == 0 || _threads_completed == _n_threads,
 450          "should not be called while tasks are being processed!");
 451   _n_threads = (t == 0 ? 1 : t);
 452 }
 453 
 454 void SubTasksDone::clear() {
 455   for (uint i = 0; i < _n_tasks; i++) {
 456     _tasks[i] = 0;
 457   }
 458   _threads_completed = 0;
 459 #ifdef ASSERT
 460   _claimed = 0;
 461 #endif
 462 }
 463 
 464 bool SubTasksDone::is_task_claimed(uint t) {
 465   assert(t < _n_tasks, "bad task id.");
 466   uint old = _tasks[t];
 467   if (old == 0) {
 468     old = Atomic::cmpxchg(1, &_tasks[t], 0);
 469   }
 470   assert(_tasks[t] == 1, "What else?");
 471   bool res = old != 0;
 472 #ifdef ASSERT
 473   if (!res) {
 474     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
 475     Atomic::inc((volatile jint*) &_claimed);
 476   }
 477 #endif
 478   return res;
 479 }
 480 
 481 void SubTasksDone::all_tasks_completed() {
 482   jint observed = _threads_completed;
 483   jint old;
 484   do {
 485     old = observed;


< prev index next >