16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25
26 #include "gc_implementation/shared/gcTimer.hpp"
27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
28 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
29 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
30 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
31 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
34 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
36 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
37 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
38 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
39 #include "memory/iterator.hpp"
40 #include "memory/universe.hpp"
41
42 #ifdef _WINDOWS
43 #pragma warning(disable : 4355)
44 #endif
45
46 SurrogateLockerThread* ShenandoahControlThread::_slt = NULL;
47
48 ShenandoahControlThread::ShenandoahControlThread() :
49 ConcurrentGCThread(),
50 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true),
51 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true),
52 _periodic_task(this),
53 _requested_gc_cause(GCCause::_no_cause_specified),
54 _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
55 _allocs_seen(0) {
82
83 void ShenandoahPeriodicSATBFlushTask::task() {
84 ShenandoahHeap::heap()->force_satb_flush_all_threads();
85 }
86
87 void ShenandoahControlThread::run() {
88 initialize_in_thread();
89
90 wait_for_universe_init();
91
92 // Wait until we have the surrogate locker thread in place.
93 {
94 MutexLockerEx x(CGC_lock, true);
95 while(_slt == NULL && !_should_terminate) {
96 CGC_lock->wait(true, 200);
97 }
98 }
99
100 ShenandoahHeap* heap = ShenandoahHeap::heap();
101
102 GCMode default_mode = concurrent_normal;
103 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
104 int sleep = ShenandoahControlIntervalMin;
105
106 double last_shrink_time = os::elapsedTime();
107 double last_sleep_adjust_time = os::elapsedTime();
108
109 // Shrink period avoids constantly polling regions for shrinking.
110 // Having a period 10x lower than the delay would mean we hit the
111 // shrinking with lag of less than 1/10-th of true delay.
112 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
113 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
114
115 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
116
117 ShenandoahHeuristics* heuristics = heap->heuristics();
118 while (!in_graceful_shutdown() && !_should_terminate) {
119 // Figure out if we have pending requests.
120 bool alloc_failure_pending = _alloc_failure_gc.is_set();
121 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
122 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
123
203 bool gc_requested = (mode != none);
204 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
205
206 if (gc_requested) {
207 heap->reset_bytes_allocated_since_gc_start();
208
209 // If GC was requested, we are sampling the counters even without actual triggers
210 // from allocation machinery. This captures GC phases more accurately.
211 set_forced_counters_update(true);
212
213 // If GC was requested, we better dump freeset data for performance debugging
214 {
215 ShenandoahHeapLocker locker(heap->lock());
216 heap->free_set()->log_status();
217 }
218 }
219
220 switch (mode) {
221 case none:
222 break;
223 case concurrent_normal:
224 service_concurrent_normal_cycle(cause);
225 break;
226 case stw_degenerated:
227 service_stw_degenerated_cycle(cause, degen_point);
228 break;
229 case stw_full:
230 service_stw_full_cycle(cause);
231 break;
232 default:
233 ShouldNotReachHere();
234 }
235
236 if (gc_requested) {
237 // If this was the requested GC cycle, notify waiters about it
238 if (explicit_gc_requested || implicit_gc_requested) {
239 notify_gc_waiters();
240 }
241
242 // If this was the allocation failure GC cycle, notify waiters about it
292 last_shrink_time = current;
293 }
294
295 // Wait before performing the next action. If allocation happened during this wait,
296 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
297 // back off exponentially.
298 if (_heap_changed.try_unset()) {
299 sleep = ShenandoahControlIntervalMin;
300 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
301 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
302 last_sleep_adjust_time = current;
303 }
304 os::naked_short_sleep(sleep);
305 }
306
307 // Wait for the actual stop(), can't leave run_service() earlier.
308 while (! _should_terminate) {
309 os::naked_short_sleep(ShenandoahControlIntervalMin);
310 }
311 terminate();
312 }
313
314 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
315 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
316 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
317 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
318 // tries to evac something and no memory is available), cycle degrades to Full GC.
319 //
320 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
321 // heuristics says there are no regions to compact, and all the collection comes from immediately
322 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
323 // mark from the next cycle.
324 //
325 // ................................................................................................
326 //
327 // (immediate garbage shortcut) Concurrent GC
328 // /-------------------------------------------\
329 // | (coalesced UR) v
330 // | /----------------------->o
331 // | | |
|
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25
26 #include "gc_implementation/shared/gcTimer.hpp"
27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
28 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
29 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
30 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
31 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
34 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
36 #include "gc_implementation/shenandoah/shenandoahTraversalGC.hpp"
37 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
38 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
39 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
40 #include "memory/iterator.hpp"
41 #include "memory/universe.hpp"
42
43 #ifdef _WINDOWS
44 #pragma warning(disable : 4355)
45 #endif
46
47 SurrogateLockerThread* ShenandoahControlThread::_slt = NULL;
48
49 ShenandoahControlThread::ShenandoahControlThread() :
50 ConcurrentGCThread(),
51 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true),
52 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true),
53 _periodic_task(this),
54 _requested_gc_cause(GCCause::_no_cause_specified),
55 _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
56 _allocs_seen(0) {
83
84 void ShenandoahPeriodicSATBFlushTask::task() {
85 ShenandoahHeap::heap()->force_satb_flush_all_threads();
86 }
87
88 void ShenandoahControlThread::run() {
89 initialize_in_thread();
90
91 wait_for_universe_init();
92
93 // Wait until we have the surrogate locker thread in place.
94 {
95 MutexLockerEx x(CGC_lock, true);
96 while(_slt == NULL && !_should_terminate) {
97 CGC_lock->wait(true, 200);
98 }
99 }
100
101 ShenandoahHeap* heap = ShenandoahHeap::heap();
102
103 GCMode default_mode = heap->is_traversal_mode() ?
104 concurrent_traversal : concurrent_normal;
105 GCCause::Cause default_cause = heap->is_traversal_mode() ?
106 GCCause::_shenandoah_traversal_gc : GCCause::_shenandoah_concurrent_gc;
107 int sleep = ShenandoahControlIntervalMin;
108
109 double last_shrink_time = os::elapsedTime();
110 double last_sleep_adjust_time = os::elapsedTime();
111
112 // Shrink period avoids constantly polling regions for shrinking.
113 // Having a period 10x lower than the delay would mean we hit the
114 // shrinking with lag of less than 1/10-th of true delay.
115 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
116 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
117
118 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
119
120 ShenandoahHeuristics* heuristics = heap->heuristics();
121 while (!in_graceful_shutdown() && !_should_terminate) {
122 // Figure out if we have pending requests.
123 bool alloc_failure_pending = _alloc_failure_gc.is_set();
124 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
125 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
126
206 bool gc_requested = (mode != none);
207 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
208
209 if (gc_requested) {
210 heap->reset_bytes_allocated_since_gc_start();
211
212 // If GC was requested, we are sampling the counters even without actual triggers
213 // from allocation machinery. This captures GC phases more accurately.
214 set_forced_counters_update(true);
215
216 // If GC was requested, we better dump freeset data for performance debugging
217 {
218 ShenandoahHeapLocker locker(heap->lock());
219 heap->free_set()->log_status();
220 }
221 }
222
223 switch (mode) {
224 case none:
225 break;
226 case concurrent_traversal:
227 service_concurrent_traversal_cycle(cause);
228 break;
229 case concurrent_normal:
230 service_concurrent_normal_cycle(cause);
231 break;
232 case stw_degenerated:
233 service_stw_degenerated_cycle(cause, degen_point);
234 break;
235 case stw_full:
236 service_stw_full_cycle(cause);
237 break;
238 default:
239 ShouldNotReachHere();
240 }
241
242 if (gc_requested) {
243 // If this was the requested GC cycle, notify waiters about it
244 if (explicit_gc_requested || implicit_gc_requested) {
245 notify_gc_waiters();
246 }
247
248 // If this was the allocation failure GC cycle, notify waiters about it
298 last_shrink_time = current;
299 }
300
301 // Wait before performing the next action. If allocation happened during this wait,
302 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
303 // back off exponentially.
304 if (_heap_changed.try_unset()) {
305 sleep = ShenandoahControlIntervalMin;
306 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
307 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
308 last_sleep_adjust_time = current;
309 }
310 os::naked_short_sleep(sleep);
311 }
312
313 // Wait for the actual stop(), can't leave run_service() earlier.
314 while (! _should_terminate) {
315 os::naked_short_sleep(ShenandoahControlIntervalMin);
316 }
317 terminate();
318 }
319
320 void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
321 ShenandoahGCSession session(cause);
322
323 ShenandoahHeap* heap = ShenandoahHeap::heap();
324 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
325
326 // Reset for upcoming cycle
327 heap->entry_reset();
328
329 heap->vmop_entry_init_traversal();
330
331 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
332
333 heap->entry_traversal();
334 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
335
336 heap->vmop_entry_final_traversal();
337
338 heap->entry_cleanup();
339
340 heap->heuristics()->record_success_concurrent();
341 heap->shenandoah_policy()->record_success_concurrent();
342 }
343
344 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
345 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
346 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
347 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
348 // tries to evac something and no memory is available), cycle degrades to Full GC.
349 //
350 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
351 // heuristics says there are no regions to compact, and all the collection comes from immediately
352 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
353 // mark from the next cycle.
354 //
355 // ................................................................................................
356 //
357 // (immediate garbage shortcut) Concurrent GC
358 // /-------------------------------------------\
359 // | (coalesced UR) v
360 // | /----------------------->o
361 // | | |
|