< prev index next >

src/share/vm/gc_implementation/g1/vm_operations_g1.cpp

Print this page




 208   // The caller may block while communicating
 209   // with the SLT thread in order to acquire/release the PLL.
 210   SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
 211   if (slt != NULL) {
 212     slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
 213   } else {
 214     SurrogateLockerThread::report_missing_slt();
 215   }
 216 }
 217 
 218 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
 219   assert(_needs_pll, "don't call this otherwise");
 220   // The caller may block while communicating
 221   // with the SLT thread in order to acquire/release the PLL.
 222   ConcurrentMarkThread::slt()->
 223     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
 224 }
 225 
 226 void VM_CGC_Operation::doit() {
 227   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 228   GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
 229   SharedHeap* sh = SharedHeap::heap();
 230   // This could go away if CollectedHeap gave access to _gc_is_active...
 231   if (sh != NULL) {
 232     IsGCActiveMark x;
 233     _cl->do_void();
 234   } else {
 235     _cl->do_void();
 236   }
 237 }
 238 
 239 bool VM_CGC_Operation::doit_prologue() {
 240   // Note the relative order of the locks must match that in
 241   // VM_GC_Operation::doit_prologue() or deadlocks can occur
 242   if (_needs_pll) {
 243     acquire_pending_list_lock();
 244   }
 245 
 246   Heap_lock->lock();
 247   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
 248   return true;
 249 }
 250 
 251 void VM_CGC_Operation::doit_epilogue() {
 252   // Note the relative order of the unlocks must match that in
 253   // VM_GC_Operation::doit_epilogue()
 254   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
 255   Heap_lock->unlock();
 256   if (_needs_pll) {
 257     release_and_notify_pending_list_lock();
 258   }
 259 }


 208   // The caller may block while communicating
 209   // with the SLT thread in order to acquire/release the PLL.
 210   SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
 211   if (slt != NULL) {
 212     slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
 213   } else {
 214     SurrogateLockerThread::report_missing_slt();
 215   }
 216 }
 217 
 218 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
 219   assert(_needs_pll, "don't call this otherwise");
 220   // The caller may block while communicating
 221   // with the SLT thread in order to acquire/release the PLL.
 222   ConcurrentMarkThread::slt()->
 223     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
 224 }
 225 
 226 void VM_CGC_Operation::doit() {
 227   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 228   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 229   GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id());


 230   IsGCActiveMark x;
 231   _cl->do_void();



 232 }
 233 
 234 bool VM_CGC_Operation::doit_prologue() {
 235   // Note the relative order of the locks must match that in
 236   // VM_GC_Operation::doit_prologue() or deadlocks can occur
 237   if (_needs_pll) {
 238     acquire_pending_list_lock();
 239   }
 240 
 241   Heap_lock->lock();

 242   return true;
 243 }
 244 
 245 void VM_CGC_Operation::doit_epilogue() {
 246   // Note the relative order of the unlocks must match that in
 247   // VM_GC_Operation::doit_epilogue()

 248   Heap_lock->unlock();
 249   if (_needs_pll) {
 250     release_and_notify_pending_list_lock();
 251   }
 252 }
< prev index next >