< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 60421 : [mq]: 8248401-stefank-review


1951       CollectedHeap::fill_with_object(mr);
1952     } else {
1953       // If we can't allocate once, we probably cannot allocate
1954       // again. Let's get out of the loop.
1955       break;
1956     }
1957   }
1958 }
1959 #endif // !PRODUCT
1960 
1961 void G1CollectedHeap::increment_old_marking_cycles_started() {
1962   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
1963          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
1964          "Wrong marking cycle count (started: %d, completed: %d)",
1965          _old_marking_cycles_started, _old_marking_cycles_completed);
1966 
1967   _old_marking_cycles_started++;
1968 }
1969 
1970 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
1971                                                              bool liveness_completed) {
1972   MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
1973 
1974   // We assume that if concurrent == true, then the caller is a
1975   // concurrent thread that was joined the Suspendible Thread
1976   // Set. If there's ever a cheap way to check this, we should add an
1977   // assert here.
1978 
1979   // Given that this method is called at the end of a Full GC or of a
1980   // concurrent cycle, and those can be nested (i.e., a Full GC can
1981   // interrupt a concurrent cycle), the number of full collections
1982   // completed should be either one (in the case where there was no
1983   // nesting) or two (when a Full GC interrupted a concurrent cycle)
1984   // behind the number of full collections started.
1985 
1986   // This is the case for the inner caller, i.e. a Full GC.
1987   assert(concurrent ||
1988          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
1989          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1990          "for inner caller (Full GC): _old_marking_cycles_started = %u "
1991          "is inconsistent with _old_marking_cycles_completed = %u",
1992          _old_marking_cycles_started, _old_marking_cycles_completed);
1993 
1994   // This is the case for the outer caller, i.e. the concurrent cycle.
1995   assert(!concurrent ||
1996          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1997          "for outer caller (concurrent cycle): "
1998          "_old_marking_cycles_started = %u "
1999          "is inconsistent with _old_marking_cycles_completed = %u",
2000          _old_marking_cycles_started, _old_marking_cycles_completed);
2001 
2002   _old_marking_cycles_completed += 1;
2003   if (liveness_completed) {
2004     next_whole_heap_examined();
2005   }
2006 
2007   // We need to clear the "in_progress" flag in the CM thread before
2008   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2009   // is set) so that if a waiter requests another System.gc() it doesn't
2010   // incorrectly see that a marking cycle is still in progress.
2011   if (concurrent) {
2012     _cm_thread->set_idle();
2013   }
2014 
2015   // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2016   // for a full GC to finish that their wait is over.
2017   ml.notify_all();
2018 }
2019 
2020 void G1CollectedHeap::collect(GCCause::Cause cause) {
2021   try_collect(cause);
2022 }
2023 




1951       CollectedHeap::fill_with_object(mr);
1952     } else {
1953       // If we can't allocate once, we probably cannot allocate
1954       // again. Let's get out of the loop.
1955       break;
1956     }
1957   }
1958 }
1959 #endif // !PRODUCT
1960 
1961 void G1CollectedHeap::increment_old_marking_cycles_started() {
1962   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
1963          _old_marking_cycles_started == _old_marking_cycles_completed + 1,
1964          "Wrong marking cycle count (started: %d, completed: %d)",
1965          _old_marking_cycles_started, _old_marking_cycles_completed);
1966 
1967   _old_marking_cycles_started++;
1968 }
1969 
1970 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
1971                                                              bool whole_heap_examined) {
1972   MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
1973 
1974   // We assume that if concurrent == true, then the caller is a
1975   // concurrent thread that was joined the Suspendible Thread
1976   // Set. If there's ever a cheap way to check this, we should add an
1977   // assert here.
1978 
1979   // Given that this method is called at the end of a Full GC or of a
1980   // concurrent cycle, and those can be nested (i.e., a Full GC can
1981   // interrupt a concurrent cycle), the number of full collections
1982   // completed should be either one (in the case where there was no
1983   // nesting) or two (when a Full GC interrupted a concurrent cycle)
1984   // behind the number of full collections started.
1985 
1986   // This is the case for the inner caller, i.e. a Full GC.
1987   assert(concurrent ||
1988          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
1989          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1990          "for inner caller (Full GC): _old_marking_cycles_started = %u "
1991          "is inconsistent with _old_marking_cycles_completed = %u",
1992          _old_marking_cycles_started, _old_marking_cycles_completed);
1993 
1994   // This is the case for the outer caller, i.e. the concurrent cycle.
1995   assert(!concurrent ||
1996          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1997          "for outer caller (concurrent cycle): "
1998          "_old_marking_cycles_started = %u "
1999          "is inconsistent with _old_marking_cycles_completed = %u",
2000          _old_marking_cycles_started, _old_marking_cycles_completed);
2001 
2002   _old_marking_cycles_completed += 1;
2003   if (whole_heap_examined) {
2004     next_whole_heap_examined();
2005   }
2006 
2007   // We need to clear the "in_progress" flag in the CM thread before
2008   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2009   // is set) so that if a waiter requests another System.gc() it doesn't
2010   // incorrectly see that a marking cycle is still in progress.
2011   if (concurrent) {
2012     _cm_thread->set_idle();
2013   }
2014 
2015   // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
2016   // for a full GC to finish that their wait is over.
2017   ml.notify_all();
2018 }
2019 
2020 void G1CollectedHeap::collect(GCCause::Cause cause) {
2021   try_collect(cause);
2022 }
2023 


< prev index next >