src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8047290 Sdiff src/share/vm/gc_implementation/parallelScavenge

src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp

Print this page




 383   _idle_workers(0),
 384   _ndc(NULL) {
 385   initialize();
 386 }
 387 
 388 GCTaskManager::GCTaskManager(uint workers, NotifyDoneClosure* ndc) :
 389   _workers(workers),
 390   _active_workers(0),
 391   _idle_workers(0),
 392   _ndc(ndc) {
 393   initialize();
 394 }
 395 
 396 void GCTaskManager::initialize() {
 397   if (TraceGCTaskManager) {
 398     tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
 399   }
 400   assert(workers() != 0, "no workers");
 401   _monitor = new Monitor(Mutex::barrier,                // rank
 402                          "GCTaskManager monitor",       // name
 403                          Mutex::_allow_vm_block_flag);  // allow_vm_block

 404   // The queue for the GCTaskManager must be a CHeapObj.
 405   GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
 406   _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
 407   _noop_task = NoopGCTask::create_on_c_heap();
 408   _idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();
 409   _resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
 410   {
 411     // Set up worker threads.
 412     //     Distribute the workers among the available processors,
 413     //     unless we were told not to, or if the os doesn't want to.
 414     uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
 415     if (!BindGCTaskThreadsToCPUs ||
 416         !os::distribute_processes(workers(), processor_assignment)) {
 417       for (uint a = 0; a < workers(); a += 1) {
 418         processor_assignment[a] = sentinel_worker();
 419       }
 420     }
 421     _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
 422     for (uint t = 0; t < workers(); t += 1) {
 423       set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));


1108   Monitor* result = NULL;
1109   // Lazy initialization: possible race.
1110   if (lock() == NULL) {
1111     _lock = new Mutex(Mutex::barrier,                  // rank
1112                       "MonitorSupply mutex",           // name
1113                       Mutex::_allow_vm_block_flag);    // allow_vm_block
1114   }
1115   {
1116     MutexLockerEx ml(lock());
1117     // Lazy initialization.
1118     if (freelist() == NULL) {
1119       _freelist =
1120         new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,
1121                                                          true);
1122     }
1123     if (! freelist()->is_empty()) {
1124       result = freelist()->pop();
1125     } else {
1126       result = new Monitor(Mutex::barrier,                  // rank
1127                            "MonitorSupply monitor",         // name
1128                            Mutex::_allow_vm_block_flag);    // allow_vm_block

1129     }
1130     guarantee(result != NULL, "shouldn't return NULL");
1131     assert(!result->is_locked(), "shouldn't be locked");
1132     // release lock().
1133   }
1134   return result;
1135 }
1136 
1137 void MonitorSupply::release(Monitor* instance) {
1138   assert(instance != NULL, "shouldn't release NULL");
1139   assert(!instance->is_locked(), "shouldn't be locked");
1140   {
1141     MutexLockerEx ml(lock());
1142     freelist()->push(instance);
1143     // release lock().
1144   }
1145 }


 383   _idle_workers(0),
 384   _ndc(NULL) {
 385   initialize();
 386 }
 387 
 388 GCTaskManager::GCTaskManager(uint workers, NotifyDoneClosure* ndc) :
 389   _workers(workers),
 390   _active_workers(0),
 391   _idle_workers(0),
 392   _ndc(ndc) {
 393   initialize();
 394 }
 395 
 396 void GCTaskManager::initialize() {
 397   if (TraceGCTaskManager) {
 398     tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
 399   }
 400   assert(workers() != 0, "no workers");
 401   _monitor = new Monitor(Mutex::barrier,                // rank
 402                          "GCTaskManager monitor",       // name
 403                          Mutex::_allow_vm_block_flag,   // allow_vm_block
 404                          Monitor::_safepoint_check_never);
 405   // The queue for the GCTaskManager must be a CHeapObj.
 406   GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
 407   _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
 408   _noop_task = NoopGCTask::create_on_c_heap();
 409   _idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();
 410   _resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
 411   {
 412     // Set up worker threads.
 413     //     Distribute the workers among the available processors,
 414     //     unless we were told not to, or if the os doesn't want to.
 415     uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
 416     if (!BindGCTaskThreadsToCPUs ||
 417         !os::distribute_processes(workers(), processor_assignment)) {
 418       for (uint a = 0; a < workers(); a += 1) {
 419         processor_assignment[a] = sentinel_worker();
 420       }
 421     }
 422     _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
 423     for (uint t = 0; t < workers(); t += 1) {
 424       set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));


1109   Monitor* result = NULL;
1110   // Lazy initialization: possible race.
1111   if (lock() == NULL) {
1112     _lock = new Mutex(Mutex::barrier,                  // rank
1113                       "MonitorSupply mutex",           // name
1114                       Mutex::_allow_vm_block_flag);    // allow_vm_block
1115   }
1116   {
1117     MutexLockerEx ml(lock());
1118     // Lazy initialization.
1119     if (freelist() == NULL) {
1120       _freelist =
1121         new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,
1122                                                          true);
1123     }
1124     if (! freelist()->is_empty()) {
1125       result = freelist()->pop();
1126     } else {
1127       result = new Monitor(Mutex::barrier,                  // rank
1128                            "MonitorSupply monitor",         // name
1129                            Mutex::_allow_vm_block_flag,     // allow_vm_block
1130                            Monitor::_safepoint_check_never);
1131         }
1132     guarantee(result != NULL, "shouldn't return NULL");
1133     assert(!result->is_locked(), "shouldn't be locked");
1134     // release lock().
1135   }
1136   return result;
1137 }
1138 
1139 void MonitorSupply::release(Monitor* instance) {
1140   assert(instance != NULL, "shouldn't release NULL");
1141   assert(!instance->is_locked(), "shouldn't be locked");
1142   {
1143     MutexLockerEx ml(lock());
1144     freelist()->push(instance);
1145     // release lock().
1146   }
1147 }
src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File