< prev index next >

src/hotspot/share/gc/shared/workgroup.cpp

Print this page




 162     uint num_started = Atomic::add(&_started, 1u);
 163 
 164     // Subtract one to get a zero-indexed worker id.
 165     uint worker_id = num_started - 1;
 166 
 167     return WorkData(_task, worker_id);
 168   }
 169 
 170   void worker_done_with_task() {
 171     // Mark that the worker is done with the task.
 172     // The worker is not allowed to read the state variables after this line.
 173     uint not_finished = Atomic::sub(&_not_finished, 1u);
 174 
 175     // The last worker signals to the coordinator that all work is completed.
 176     if (not_finished == 0) {
 177       _end_semaphore->signal();
 178     }
 179   }
 180 };
 181 
 182 class MutexGangTaskDispatcher : public GangTaskDispatcher {
 183   AbstractGangTask* _task;
 184 
 185   volatile uint _started;
 186   volatile uint _finished;
 187   volatile uint _num_workers;
 188 
 189   Monitor* _monitor;
 190 
 191  public:
 192   MutexGangTaskDispatcher() :
 193     _task(NULL),
 194     _started(0),
 195     _finished(0),
 196     _num_workers(0),
 197     _monitor(new Monitor(Monitor::leaf, "WorkGang dispatcher lock", false, Monitor::_safepoint_check_never)) {
 198   }
 199 
 200   ~MutexGangTaskDispatcher() {
 201     delete _monitor;
 202   }
 203 
 204   void coordinator_execute_on_workers(AbstractGangTask* task, uint num_workers, bool add_foreground_work) {
 205     MonitorLocker ml(_monitor, Mutex::_no_safepoint_check_flag);
 206 
 207     _task        = task;
 208     _num_workers = num_workers;
 209 
 210     // Tell the workers to get to work.
 211     _monitor->notify_all();
 212 
 213     run_foreground_task_if_needed(task, num_workers, add_foreground_work);
 214 
 215     // Wait for them to finish.
 216     while (_finished < _num_workers) {
 217       ml.wait();
 218     }
 219 
 220     _task        = NULL;
 221     _num_workers = 0;
 222     _started     = 0;
 223     _finished    = 0;
 224   }
 225 
 226   WorkData worker_wait_for_task() {
 227     MonitorLocker ml(_monitor, Mutex::_no_safepoint_check_flag);
 228 
 229     while (_num_workers == 0 || _started == _num_workers) {
 230       _monitor->wait();
 231     }
 232 
 233     _started++;
 234 
 235     // Subtract one to get a zero-indexed worker id.
 236     uint worker_id = _started - 1;
 237 
 238     return WorkData(_task, worker_id);
 239   }
 240 
 241   void worker_done_with_task() {
 242     MonitorLocker ml(_monitor, Mutex::_no_safepoint_check_flag);
 243 
 244     _finished++;
 245 
 246     if (_finished == _num_workers) {
 247       // This will wake up all workers and not only the coordinator.
 248       _monitor->notify_all();
 249     }
 250   }
 251 };
 252 
 253 static GangTaskDispatcher* create_dispatcher() {
 254   if (UseSemaphoreGCThreadsSynchronization) {
 255     return new SemaphoreGangTaskDispatcher();
 256   }
 257 
 258   return new MutexGangTaskDispatcher();
 259 }
 260 
 261 WorkGang::WorkGang(const char* name,
 262                    uint  workers,
 263                    bool  are_GC_task_threads,
 264                    bool  are_ConcurrentGC_threads) :
 265     AbstractWorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
 266     _dispatcher(create_dispatcher())
 267 { }
 268 
 269 WorkGang::~WorkGang() {
 270   delete _dispatcher;
 271 }
 272 
 273 AbstractGangWorker* WorkGang::allocate_worker(uint worker_id) {
 274   return new GangWorker(this, worker_id);
 275 }
 276 
 277 void WorkGang::run_task(AbstractGangTask* task) {
 278   run_task(task, active_workers());
 279 }
 280 
 281 void WorkGang::run_task(AbstractGangTask* task, uint num_workers, bool add_foreground_work) {
 282   guarantee(num_workers <= total_workers(),
 283             "Trying to execute task %s with %u workers which is more than the amount of total workers %u.",
 284             task->name(), num_workers, total_workers());
 285   guarantee(num_workers > 0, "Trying to execute task %s with zero workers", task->name());
 286   uint old_num_workers = _active_workers;




 162     uint num_started = Atomic::add(&_started, 1u);
 163 
 164     // Subtract one to get a zero-indexed worker id.
 165     uint worker_id = num_started - 1;
 166 
 167     return WorkData(_task, worker_id);
 168   }
 169 
 170   void worker_done_with_task() {
 171     // Mark that the worker is done with the task.
 172     // The worker is not allowed to read the state variables after this line.
 173     uint not_finished = Atomic::sub(&_not_finished, 1u);
 174 
 175     // The last worker signals to the coordinator that all work is completed.
 176     if (not_finished == 0) {
 177       _end_semaphore->signal();
 178     }
 179   }
 180 };
 181 















































































 182 WorkGang::WorkGang(const char* name,
 183                    uint  workers,
 184                    bool  are_GC_task_threads,
 185                    bool  are_ConcurrentGC_threads) :
 186     AbstractWorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
 187     _dispatcher(new SemaphoreGangTaskDispatcher())
 188 { }
 189 
 190 WorkGang::~WorkGang() {
 191   delete _dispatcher;
 192 }
 193 
 194 AbstractGangWorker* WorkGang::allocate_worker(uint worker_id) {
 195   return new GangWorker(this, worker_id);
 196 }
 197 
 198 void WorkGang::run_task(AbstractGangTask* task) {
 199   run_task(task, active_workers());
 200 }
 201 
 202 void WorkGang::run_task(AbstractGangTask* task, uint num_workers, bool add_foreground_work) {
 203   guarantee(num_workers <= total_workers(),
 204             "Trying to execute task %s with %u workers which is more than the amount of total workers %u.",
 205             task->name(), num_workers, total_workers());
 206   guarantee(num_workers > 0, "Trying to execute task %s with zero workers", task->name());
 207   uint old_num_workers = _active_workers;


< prev index next >