< prev index next >

src/share/vm/gc/g1/vm_operations_g1.cpp

Print this page




 188     // actually taking the lock and doing the wait.
 189     if (g1h->old_marking_cycles_completed() <=
 190                                           _old_marking_cycles_completed_before) {
 191       // The following is largely copied from CMS
 192 
 193       Thread* thr = Thread::current();
 194       assert(thr->is_Java_thread(), "invariant");
 195       JavaThread* jt = (JavaThread*)thr;
 196       ThreadToNativeFromVM native(jt);
 197 
 198       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 199       while (g1h->old_marking_cycles_completed() <=
 200                                           _old_marking_cycles_completed_before) {
 201         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 202       }
 203     }
 204   }
 205 }
 206 
 207 void VM_CGC_Operation::acquire_pending_list_lock() {
 208   assert(_needs_pending_list_lock, "don't call this otherwise");
 209   _pending_list_locker.lock();
 210 }
 211 
 212 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
 213   assert(_needs_pending_list_lock, "don't call this otherwise");
 214   _pending_list_locker.unlock();
 215 }
 216 
 217 void VM_CGC_Operation::doit() {
 218   GCIdMark gc_id_mark(_gc_id);
 219   GCTraceCPUTime tcpu;
 220   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 221   GCTraceTime(Info, gc) t(_printGCMessage, g1h->gc_timer_cm(), GCCause::_no_gc, true);
 222   IsGCActiveMark x;
 223   _cl->do_void();
 224 }
 225 
 226 bool VM_CGC_Operation::doit_prologue() {
 227   // Note the relative order of the locks must match that in
 228   // VM_GC_Operation::doit_prologue() or deadlocks can occur
 229   if (_needs_pending_list_lock) {
 230     acquire_pending_list_lock();
 231   }
 232   Heap_lock->lock();
 233   return true;


 188     // actually taking the lock and doing the wait.
 189     if (g1h->old_marking_cycles_completed() <=
 190                                           _old_marking_cycles_completed_before) {
 191       // The following is largely copied from CMS
 192 
 193       Thread* thr = Thread::current();
 194       assert(thr->is_Java_thread(), "invariant");
 195       JavaThread* jt = (JavaThread*)thr;
 196       ThreadToNativeFromVM native(jt);
 197 
 198       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
 199       while (g1h->old_marking_cycles_completed() <=
 200                                           _old_marking_cycles_completed_before) {
 201         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
 202       }
 203     }
 204   }
 205 }
 206 
 207 void VM_CGC_Operation::acquire_pending_list_lock() {

 208   _pending_list_locker.lock();
 209 }
 210 
 211 void VM_CGC_Operation::release_and_notify_pending_list_lock() {

 212   _pending_list_locker.unlock();
 213 }
 214 
 215 void VM_CGC_Operation::doit() {
 216   GCIdMark gc_id_mark(_gc_id);
 217   GCTraceCPUTime tcpu;
 218   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 219   GCTraceTime(Info, gc) t(_printGCMessage, g1h->gc_timer_cm(), GCCause::_no_gc, true);
 220   IsGCActiveMark x;
 221   _cl->do_void();
 222 }
 223 
 224 bool VM_CGC_Operation::doit_prologue() {
 225   // Note the relative order of the locks must match that in
 226   // VM_GC_Operation::doit_prologue() or deadlocks can occur
 227   if (_needs_pending_list_lock) {
 228     acquire_pending_list_lock();
 229   }
 230   Heap_lock->lock();
 231   return true;
< prev index next >