1 /*
2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25
26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
33 #include "gc/shenandoah/shenandoahControlThread.hpp"
34 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
35 #include "gc/shenandoah/shenandoahUtils.hpp"
36 #include "gc/shenandoah/shenandoahVMOperations.hpp"
37 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
38 #include "memory/iterator.hpp"
39 #include "memory/universe.hpp"
40
41 ShenandoahControlThread::ShenandoahControlThread() :
42 ConcurrentGCThread(),
43 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
44 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
45 _periodic_task(this),
46 _requested_gc_cause(GCCause::_no_cause_specified),
47 _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
48 _allocs_seen(0) {
49
50 create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
51 _periodic_task.enroll();
52 _periodic_satb_flush_task.enroll();
53 }
54
55 ShenandoahControlThread::~ShenandoahControlThread() {
56 // This is here so that super is called.
57 }
58
59 void ShenandoahPeriodicTask::task() {
60 _thread->handle_force_counters_update();
61 _thread->handle_counters_update();
62 }
63
64 void ShenandoahPeriodicSATBFlushTask::task() {
65 ShenandoahHeap::heap()->force_satb_flush_all_threads();
66 }
67
68 void ShenandoahControlThread::run_service() {
69 ShenandoahHeap* heap = ShenandoahHeap::heap();
70
71 GCMode default_mode = heap->is_traversal_mode() ?
72 concurrent_traversal : concurrent_normal;
73 GCCause::Cause default_cause = heap->is_traversal_mode() ?
74 GCCause::_shenandoah_traversal_gc : GCCause::_shenandoah_concurrent_gc;
75 int sleep = ShenandoahControlIntervalMin;
76
77 double last_shrink_time = os::elapsedTime();
78 double last_sleep_adjust_time = os::elapsedTime();
79
80 // Shrink period avoids constantly polling regions for shrinking.
81 // Having a period 10x lower than the delay would mean we hit the
82 // shrinking with lag of less than 1/10-th of true delay.
83 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
84 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
85
86 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
87 ShenandoahHeuristics* heuristics = heap->heuristics();
88 while (!in_graceful_shutdown() && !should_terminate()) {
89 // Figure out if we have pending requests.
90 bool alloc_failure_pending = _alloc_failure_gc.is_set();
91 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
92 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
93
94 // This control loop iteration have seen this much allocations.
95 size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
96
97 // Choose which GC mode to run in. The block below should select a single mode.
98 GCMode mode = none;
99 GCCause::Cause cause = GCCause::_last_gc_cause;
100 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
101
102 if (alloc_failure_pending) {
103 // Allocation failure takes precedence: we have to deal with it first thing
104 log_info(gc)("Trigger: Handle Allocation Failure");
105
106 cause = GCCause::_allocation_failure;
107
108 // Consume the degen point, and seed it with default value
109 degen_point = _degen_point;
110 _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
111
112 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
113 heuristics->record_allocation_failure_gc();
114 policy->record_alloc_failure_to_degenerated(degen_point);
115 mode = stw_degenerated;
116 } else {
117 heuristics->record_allocation_failure_gc();
118 policy->record_alloc_failure_to_full();
119 mode = stw_full;
120 }
121
122 } else if (explicit_gc_requested) {
123 cause = _requested_gc_cause;
124 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
125
126 heuristics->record_requested_gc();
127
128 if (ExplicitGCInvokesConcurrent) {
129 policy->record_explicit_to_concurrent();
130 mode = default_mode;
131 // Unload and clean up everything
132 heap->set_process_references(heuristics->can_process_references());
133 heap->set_unload_classes(heuristics->can_unload_classes());
134 } else {
135 policy->record_explicit_to_full();
136 mode = stw_full;
137 }
138 } else if (implicit_gc_requested) {
139 cause = _requested_gc_cause;
140 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
141
142 heuristics->record_requested_gc();
143
144 if (ShenandoahImplicitGCInvokesConcurrent) {
145 policy->record_implicit_to_concurrent();
146 mode = default_mode;
147
148 // Unload and clean up everything
149 heap->set_process_references(heuristics->can_process_references());
150 heap->set_unload_classes(heuristics->can_unload_classes());
151 } else {
152 policy->record_implicit_to_full();
153 mode = stw_full;
154 }
155 } else {
156 // Potential normal cycle: ask heuristics if it wants to act
157 if (heuristics->should_start_gc()) {
158 mode = default_mode;
159 cause = default_cause;
160 }
161
162 // Ask policy if this cycle wants to process references or unload classes
163 heap->set_process_references(heuristics->should_process_references());
164 heap->set_unload_classes(heuristics->should_unload_classes());
165 }
166
167 // Blow all soft references on this cycle, if handling allocation failure,
168 // or we are requested to do so unconditionally.
169 if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) {
170 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
171 }
172
173 bool gc_requested = (mode != none);
174 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
175
176 if (gc_requested) {
177 heap->reset_bytes_allocated_since_gc_start();
178
179 // If GC was requested, we are sampling the counters even without actual triggers
180 // from allocation machinery. This captures GC phases more accurately.
181 set_forced_counters_update(true);
182
183 // If GC was requested, we better dump freeset data for performance debugging
184 {
185 ShenandoahHeapLocker locker(heap->lock());
186 heap->free_set()->log_status();
187 }
188 }
189
190 switch (mode) {
191 case none:
192 break;
193 case concurrent_traversal:
194 service_concurrent_traversal_cycle(cause);
195 break;
196 case concurrent_normal:
197 service_concurrent_normal_cycle(cause);
198 break;
199 case stw_degenerated:
200 service_stw_degenerated_cycle(cause, degen_point);
201 break;
202 case stw_full:
203 service_stw_full_cycle(cause);
204 break;
205 default:
206 ShouldNotReachHere();
207 }
208
209 if (gc_requested) {
210 // If this was the requested GC cycle, notify waiters about it
211 if (explicit_gc_requested || implicit_gc_requested) {
212 notify_gc_waiters();
213 }
214
215 // If this was the allocation failure GC cycle, notify waiters about it
216 if (alloc_failure_pending) {
217 notify_alloc_failure_waiters();
218 }
219
220 // Report current free set state at the end of cycle, whether
221 // it is a normal completion, or the abort.
222 {
223 ShenandoahHeapLocker locker(heap->lock());
224 heap->free_set()->log_status();
225
226 // Notify Universe about new heap usage. This has implications for
227 // global soft refs policy, and we better report it every time heap
228 // usage goes down.
229 Universe::update_heap_info_at_gc();
230 }
231
232 // Disable forced counters update, and update counters one more time
233 // to capture the state at the end of GC session.
234 handle_force_counters_update();
235 set_forced_counters_update(false);
236
237 // Retract forceful part of soft refs policy
238 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
239
240 // Clear metaspace oom flag, if current cycle unloaded classes
241 if (heap->unload_classes()) {
242 heuristics->clear_metaspace_oom();
243 }
244
245 // GC is over, we are at idle now
246 if (ShenandoahPacing) {
247 heap->pacer()->setup_for_idle();
248 }
249 } else {
250 // Allow allocators to know we have seen this much regions
251 if (ShenandoahPacing && (allocs_seen > 0)) {
252 heap->pacer()->report_alloc(allocs_seen);
253 }
254 }
255
256 double current = os::elapsedTime();
257
258 if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) {
259 // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything.
260 // Regular paths uncommit only occasionally.
261 double shrink_before = explicit_gc_requested ?
262 current :
263 current - (ShenandoahUncommitDelay / 1000.0);
264 service_uncommit(shrink_before);
265 last_shrink_time = current;
266 }
267
268 // Wait before performing the next action. If allocation happened during this wait,
269 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
270 // back off exponentially.
271 if (_heap_changed.try_unset()) {
272 sleep = ShenandoahControlIntervalMin;
273 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
274 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
275 last_sleep_adjust_time = current;
276 }
277 os::naked_short_sleep(sleep);
278 }
279
280 // Wait for the actual stop(), can't leave run_service() earlier.
281 while (!should_terminate()) {
282 os::naked_short_sleep(ShenandoahControlIntervalMin);
283 }
284 }
285
286 void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
287 GCIdMark gc_id_mark;
288 ShenandoahGCSession session(cause);
289
290 ShenandoahHeap* heap = ShenandoahHeap::heap();
291 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
292
293 // Reset for upcoming cycle
294 heap->entry_reset();
295
296 heap->vmop_entry_init_traversal();
297
298 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
299
300 heap->entry_traversal();
301 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
302
303 heap->vmop_entry_final_traversal();
304
305 heap->entry_cleanup();
306
307 heap->heuristics()->record_success_concurrent();
308 heap->shenandoah_policy()->record_success_concurrent();
309 }
310
311 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
312 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
313 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
314 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
315 // tries to evac something and no memory is available), cycle degrades to Full GC.
316 //
317 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
318 // heuristics says there are no regions to compact, and all the collection comes from immediately
319 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
320 // mark from the next cycle.
321 //
322 // ................................................................................................
323 //
324 // (immediate garbage shortcut) Concurrent GC
325 // /-------------------------------------------\
326 // | (coalesced UR) v
327 // | /----------------------->o
328 // | | |
329 // | | v
330 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
331 // | | | ^
332 // | (af) | (af) | (af) |
333 // ..................|....................|.................|..............|.......................
334 // | | | |
335 // | | | | Degenerated GC
336 // v v v |
337 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
338 // | | | ^
339 // | (af) | (af) | (af) |
340 // ..................|....................|.................|..............|.......................
341 // | | | |
342 // | v | | Full GC
343 // \------------------->o<----------------/ |
344 // | |
345 // v |
346 // Full GC --------------------------/
347 //
348 ShenandoahHeap* heap = ShenandoahHeap::heap();
349
350 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
351
352 GCIdMark gc_id_mark;
353 ShenandoahGCSession session(cause);
354
355 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
356
357 // Reset for upcoming marking
358 heap->entry_reset();
359
360 // Start initial mark under STW
361 heap->vmop_entry_init_mark();
362
363 // Continue concurrent mark
364 heap->entry_mark();
365 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
366
367 // If not cancelled, can try to concurrently pre-clean
368 heap->entry_preclean();
369
370 // Complete marking under STW, and start evacuation
371 heap->vmop_entry_final_mark();
372
373 // Evacuate concurrent roots
374 heap->entry_roots();
375
376 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
377 // the space. This would be the last action if there is nothing to evacuate.
378 heap->entry_cleanup();
379
380 {
381 ShenandoahHeapLocker locker(heap->lock());
382 heap->free_set()->log_status();
383 }
384
385 // Continue the cycle with evacuation and optional update-refs.
386 // This may be skipped if there is nothing to evacuate.
387 // If so, evac_in_progress would be unset by collection set preparation code.
388 if (heap->is_evacuation_in_progress()) {
389 // Concurrently evacuate
390 heap->entry_evac();
391 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
392
393 // Perform update-refs phase, if required. This phase can be skipped if heuristics
394 // decides to piggy-back the update-refs on the next marking cycle. On either path,
395 // we need to turn off evacuation: either in init-update-refs, or in final-evac.
396 if (heap->heuristics()->should_start_update_refs()) {
397 heap->vmop_entry_init_updaterefs();
398 heap->entry_updaterefs();
399 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
400
401 heap->vmop_entry_final_updaterefs();
402
403 // Update references freed up collection set, kick the cleanup to reclaim the space.
404 heap->entry_cleanup();
405
406 } else {
407 heap->vmop_entry_final_evac();
408 }
409 }
410
411 // Cycle is complete
412 heap->heuristics()->record_success_concurrent();
413 heap->shenandoah_policy()->record_success_concurrent();
414 }
415
416 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
417 ShenandoahHeap* heap = ShenandoahHeap::heap();
418 if (heap->cancelled_gc()) {
419 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
420 if (!in_graceful_shutdown()) {
421 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
422 "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
423 _degen_point = point;
424 }
425 return true;
426 }
427 return false;
428 }
429
430 void ShenandoahControlThread::stop_service() {
431 // Nothing to do here.
432 }
433
434 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
435 GCIdMark gc_id_mark;
436 ShenandoahGCSession session(cause);
437
438 ShenandoahHeap* heap = ShenandoahHeap::heap();
439 heap->vmop_entry_full(cause);
440
441 heap->heuristics()->record_success_full();
442 heap->shenandoah_policy()->record_success_full();
443 }
444
445 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
446 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
447
448 GCIdMark gc_id_mark;
449 ShenandoahGCSession session(cause);
450
451 ShenandoahHeap* heap = ShenandoahHeap::heap();
452 heap->vmop_degenerated(point);
453
454 heap->heuristics()->record_success_degenerated();
455 heap->shenandoah_policy()->record_success_degenerated();
456 }
457
458 void ShenandoahControlThread::service_uncommit(double shrink_before) {
459 ShenandoahHeap* heap = ShenandoahHeap::heap();
460
461 // Determine if there is work to do. This avoids taking heap lock if there is
462 // no work available, avoids spamming logs with superfluous logging messages,
463 // and minimises the amount of work while locks are taken.
464
465 if (heap->committed() <= heap->min_capacity()) return;
466
467 bool has_work = false;
468 for (size_t i = 0; i < heap->num_regions(); i++) {
469 ShenandoahHeapRegion *r = heap->get_region(i);
470 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
471 has_work = true;
472 break;
473 }
474 }
475
476 if (has_work) {
477 heap->entry_uncommit(shrink_before);
478 }
479 }
480
481 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
482 return GCCause::is_user_requested_gc(cause) ||
483 GCCause::is_serviceability_requested_gc(cause);
484 }
485
486 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
487 assert(GCCause::is_user_requested_gc(cause) ||
488 GCCause::is_serviceability_requested_gc(cause) ||
489 cause == GCCause::_metadata_GC_clear_soft_refs ||
490 cause == GCCause::_full_gc_alot ||
491 cause == GCCause::_wb_full_gc ||
492 cause == GCCause::_scavenge_alot,
493 "only requested GCs here");
494
495 if (is_explicit_gc(cause)) {
496 if (!DisableExplicitGC) {
497 handle_requested_gc(cause);
498 }
499 } else {
500 handle_requested_gc(cause);
501 }
502 }
503
504 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
505 _requested_gc_cause = cause;
506 _gc_requested.set();
507 MonitorLocker ml(&_gc_waiters_lock);
508 while (_gc_requested.is_set()) {
509 ml.wait();
510 }
511 }
512
513 void ShenandoahControlThread::handle_alloc_failure(size_t words) {
514 ShenandoahHeap* heap = ShenandoahHeap::heap();
515
516 assert(current()->is_Java_thread(), "expect Java thread here");
517
518 if (try_set_alloc_failure_gc()) {
519 // Only report the first allocation failure
520 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s",
521 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
522
523 // Now that alloc failure GC is scheduled, we can abort everything else
524 heap->cancel_gc(GCCause::_allocation_failure);
525 }
526
527 MonitorLocker ml(&_alloc_failure_waiters_lock);
528 while (is_alloc_failure_gc()) {
529 ml.wait();
530 }
531 }
532
533 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
534 ShenandoahHeap* heap = ShenandoahHeap::heap();
535
536 if (try_set_alloc_failure_gc()) {
537 // Only report the first allocation failure
538 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
539 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
540 }
541
542 // Forcefully report allocation failure
543 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
544 }
545
546 void ShenandoahControlThread::notify_alloc_failure_waiters() {
547 _alloc_failure_gc.unset();
548 MonitorLocker ml(&_alloc_failure_waiters_lock);
549 ml.notify_all();
550 }
551
552 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
553 return _alloc_failure_gc.try_set();
554 }
555
556 bool ShenandoahControlThread::is_alloc_failure_gc() {
557 return _alloc_failure_gc.is_set();
558 }
559
560 void ShenandoahControlThread::notify_gc_waiters() {
561 _gc_requested.unset();
562 MonitorLocker ml(&_gc_waiters_lock);
563 ml.notify_all();
564 }
565
566 void ShenandoahControlThread::handle_counters_update() {
567 if (_do_counters_update.is_set()) {
568 _do_counters_update.unset();
569 ShenandoahHeap::heap()->monitoring_support()->update_counters();
570 }
571 }
572
573 void ShenandoahControlThread::handle_force_counters_update() {
574 if (_force_counters_update.is_set()) {
575 _do_counters_update.unset(); // reset these too, we do update now!
576 ShenandoahHeap::heap()->monitoring_support()->update_counters();
577 }
578 }
579
580 void ShenandoahControlThread::notify_heap_changed() {
581 // This is called from allocation path, and thus should be fast.
582
583 // Update monitoring counters when we took a new region. This amortizes the
584 // update costs on slow path.
585 if (_do_counters_update.is_unset()) {
586 _do_counters_update.set();
587 }
588 // Notify that something had changed.
589 if (_heap_changed.is_unset()) {
590 _heap_changed.set();
591 }
592 }
593
594 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
595 assert(ShenandoahPacing, "should only call when pacing is enabled");
596 Atomic::add(&_allocs_seen, words);
597 }
598
599 void ShenandoahControlThread::set_forced_counters_update(bool value) {
600 _force_counters_update.set_cond(value);
601 }
602
603 void ShenandoahControlThread::print() const {
604 print_on(tty);
605 }
606
607 void ShenandoahControlThread::print_on(outputStream* st) const {
608 st->print("Shenandoah Concurrent Thread");
609 Thread::print_on(st);
610 st->cr();
611 }
612
613 void ShenandoahControlThread::start() {
614 create_and_start();
615 }
616
617 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
618 _graceful_shutdown.set();
619 }
620
621 bool ShenandoahControlThread::in_graceful_shutdown() {
622 return _graceful_shutdown.is_set();
623 }
--- EOF ---