< prev index next >

src/share/vm/gc/g1/vm_operations_g1.cpp

Print this page




 151                                       true /* expect_null_cur_alloc_region */);
 152   } else {
 153     assert(_result == NULL, "invariant");
 154     if (!_pause_succeeded) {
 155       // Another possible reason reason for the pause to not be successful
 156       // is that, again, the GC locker is active (and has become active
 157       // since the prologue was executed). In this case we should retry
 158       // the pause after waiting for the GC locker to become inactive.
 159       _should_retry_gc = true;
 160     }
 161   }
 162 }
 163 
 164 void VM_G1IncCollectionPause::doit_epilogue() {
 165   VM_G1OperationWithAllocRequest::doit_epilogue();
 166 
 167   // If the pause was initiated by a System.gc() and
 168   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
 169   // that just started (or maybe one that was already in progress) to
 170   // finish.
 171   if (_gc_cause == GCCause::_java_lang_system_gc &&
 172       _should_initiate_conc_mark) {
 173     assert(ExplicitGCInvokesConcurrent,
 174            "the only way to be here is if ExplicitGCInvokesConcurrent is set");
 175 
 176     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 177 
 178     // In the doit() method we saved g1h->old_marking_cycles_completed()
 179     // in the _old_marking_cycles_completed_before field. We have to
 180     // wait until we observe that g1h->old_marking_cycles_completed()
 181     // has increased by at least one. This can happen if a) we started
 182     // a cycle and it completes, b) a cycle already in progress
 183     // completes, or c) a Full GC happens.
 184 
 185     // If the condition has already been reached, there's no point in
 186     // actually taking the lock and doing the wait.
 187     if (g1h->old_marking_cycles_completed() <=
 188                                           _old_marking_cycles_completed_before) {
 189       // The following is largely copied from CMS
 190 
 191       Thread* thr = Thread::current();




 151                                       true /* expect_null_cur_alloc_region */);
 152   } else {
 153     assert(_result == NULL, "invariant");
 154     if (!_pause_succeeded) {
 155       // Another possible reason reason for the pause to not be successful
 156       // is that, again, the GC locker is active (and has become active
 157       // since the prologue was executed). In this case we should retry
 158       // the pause after waiting for the GC locker to become inactive.
 159       _should_retry_gc = true;
 160     }
 161   }
 162 }
 163 
 164 void VM_G1IncCollectionPause::doit_epilogue() {
 165   VM_G1OperationWithAllocRequest::doit_epilogue();
 166 
 167   // If the pause was initiated by a System.gc() and
 168   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
 169   // that just started (or maybe one that was already in progress) to
 170   // finish.
 171   if (GCCause::is_user_requested_gc(_gc_cause) &&
 172       _should_initiate_conc_mark) {
 173     assert(ExplicitGCInvokesConcurrent,
 174            "the only way to be here is if ExplicitGCInvokesConcurrent is set");
 175 
 176     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 177 
 178     // In the doit() method we saved g1h->old_marking_cycles_completed()
 179     // in the _old_marking_cycles_completed_before field. We have to
 180     // wait until we observe that g1h->old_marking_cycles_completed()
 181     // has increased by at least one. This can happen if a) we started
 182     // a cycle and it completes, b) a cycle already in progress
 183     // completes, or c) a Full GC happens.
 184 
 185     // If the condition has already been reached, there's no point in
 186     // actually taking the lock and doing the wait.
 187     if (g1h->old_marking_cycles_completed() <=
 188                                           _old_marking_cycles_completed_before) {
 189       // The following is largely copied from CMS
 190 
 191       Thread* thr = Thread::current();


< prev index next >