< prev index next >

src/share/vm/gc/g1/vm_operations_g1.cpp

Print this page




 187     // If the condition has already been reached, there's no point in
 188     // actually taking the lock and doing the wait.
 189     if (g1h->old_marking_cycles_completed() <=
 190                                           _old_marking_cycles_completed_before) {
 191       // The following is largely copied from CMS
 192 
 193       Thread* thr = Thread::current();
 194       assert(thr->is_Java_thread(), "invariant");
 195       JavaThread* jt = (JavaThread*)thr;
 196       ThreadToNativeFromVM native(jt);
 197 
 198       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 199       while (g1h->old_marking_cycles_completed() <=
 200                                           _old_marking_cycles_completed_before) {
 201         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 202       }
 203     }
 204   }
 205 }
 206 
 207 void VM_CGC_Operation::acquire_pending_list_lock() {
 208   _pending_list_locker.lock();
 209 }
 210 
 211 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
 212   _pending_list_locker.unlock();
 213 }
 214 
 215 void VM_CGC_Operation::doit() {
 216   GCIdMark gc_id_mark(_gc_id);
 217   GCTraceCPUTime tcpu;
 218   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 219   GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
 220   IsGCActiveMark x;
 221   _cl->do_void();
 222 }
 223 
 224 bool VM_CGC_Operation::doit_prologue() {
 225   // Note the relative order of the locks must match that in
 226   // VM_GC_Operation::doit_prologue() or deadlocks can occur
 227   if (_needs_pending_list_lock) {
 228     acquire_pending_list_lock();
 229   }
 230   Heap_lock->lock();
 231   return true;
 232 }
 233 
 234 void VM_CGC_Operation::doit_epilogue() {
 235   // Note the relative order of the unlocks must match that in
 236   // VM_GC_Operation::doit_epilogue()
 237   Heap_lock->unlock();
 238   if (_needs_pending_list_lock) {
 239     release_and_notify_pending_list_lock();
 240   }

 241 }


 187     // If the condition has already been reached, there's no point in
 188     // actually taking the lock and doing the wait.
 189     if (g1h->old_marking_cycles_completed() <=
 190                                           _old_marking_cycles_completed_before) {
 191       // The following is largely copied from CMS
 192 
 193       Thread* thr = Thread::current();
 194       assert(thr->is_Java_thread(), "invariant");
 195       JavaThread* jt = (JavaThread*)thr;
 196       ThreadToNativeFromVM native(jt);
 197 
 198       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 199       while (g1h->old_marking_cycles_completed() <=
 200                                           _old_marking_cycles_completed_before) {
 201         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 202       }
 203     }
 204   }
 205 }
 206 








 207 void VM_CGC_Operation::doit() {
 208   GCIdMark gc_id_mark(_gc_id);
 209   GCTraceCPUTime tcpu;
 210   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 211   GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
 212   IsGCActiveMark x;
 213   _cl->do_void();
 214 }
 215 
 216 bool VM_CGC_Operation::doit_prologue() {





 217   Heap_lock->lock();
 218   return true;
 219 }
 220 
 221 void VM_CGC_Operation::doit_epilogue() {
 222   if (Universe::has_reference_pending_list()) {
 223     Heap_lock->notify_all();



 224   }
 225   Heap_lock->unlock();
 226 }
< prev index next >