1 /* 2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 31 #include "gc/shenandoah/shenandoahHeuristics.hpp" 32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 33 #include "gc/shenandoah/shenandoahControlThread.hpp" 34 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "gc/shenandoah/shenandoahVMOperations.hpp" 37 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 38 #include "memory/iterator.hpp" 39 #include "memory/universe.hpp" 40 41 ShenandoahControlThread::ShenandoahControlThread() : 42 ConcurrentGCThread(), 43 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always), 44 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always), 45 _periodic_task(this), 46 _requested_gc_cause(GCCause::_no_cause_specified), 47 _degen_point(ShenandoahHeap::_degenerated_outside_cycle), 48 _allocs_seen(0) { 49 50 create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority); 51 _periodic_task.enroll(); 52 _periodic_satb_flush_task.enroll(); 53 } 54 55 ShenandoahControlThread::~ShenandoahControlThread() { 56 // This is here so that super is called. 57 } 58 59 void ShenandoahPeriodicTask::task() { 60 _thread->handle_force_counters_update(); 61 _thread->handle_counters_update(); 62 } 63 64 void ShenandoahPeriodicSATBFlushTask::task() { 65 ShenandoahHeap::heap()->force_satb_flush_all_threads(); 66 } 67 68 void ShenandoahControlThread::run_service() { 69 ShenandoahHeap* heap = ShenandoahHeap::heap(); 70 71 GCMode default_mode = heap->is_traversal_mode() ? concurrent_traversal : concurrent_normal; 72 GCCause::Cause default_cause = default_mode == concurrent_traversal ? GCCause::_shenandoah_traversal_gc : GCCause::_shenandoah_concurrent_gc; 73 int sleep = ShenandoahControlIntervalMin; 74 75 double last_shrink_time = os::elapsedTime(); 76 double last_sleep_adjust_time = os::elapsedTime(); 77 78 // Shrink period avoids constantly polling regions for shrinking. 79 // Having a period 10x lower than the delay would mean we hit the 80 // shrinking with lag of less than 1/10-th of true delay. 81 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 82 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 83 84 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); 85 ShenandoahHeuristics* heuristics = heap->heuristics(); 86 while (!in_graceful_shutdown() && !should_terminate()) { 87 // Figure out if we have pending requests. 88 bool alloc_failure_pending = _alloc_failure_gc.is_set(); 89 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause); 90 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); 91 92 // This control loop iteration have seen this much allocations. 93 size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen); 94 95 // Choose which GC mode to run in. The block below should select a single mode. 96 GCMode mode = none; 97 GCCause::Cause cause = GCCause::_last_gc_cause; 98 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; 99 100 if (alloc_failure_pending) { 101 // Allocation failure takes precedence: we have to deal with it first thing 102 log_info(gc)("Trigger: Handle Allocation Failure"); 103 104 cause = GCCause::_allocation_failure; 105 106 // Consume the degen point, and seed it with default value 107 degen_point = _degen_point; 108 _degen_point = ShenandoahHeap::_degenerated_outside_cycle; 109 110 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { 111 heuristics->record_allocation_failure_gc(); 112 policy->record_alloc_failure_to_degenerated(degen_point); 113 mode = stw_degenerated; 114 } else { 115 heuristics->record_allocation_failure_gc(); 116 policy->record_alloc_failure_to_full(); 117 mode = stw_full; 118 } 119 120 } else if (explicit_gc_requested) { 121 cause = _requested_gc_cause; 122 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); 123 124 heuristics->record_requested_gc(); 125 126 if (ExplicitGCInvokesConcurrent) { 127 policy->record_explicit_to_concurrent(); 128 mode = default_mode; 129 // Unload and clean up everything 130 heap->set_process_references(heuristics->can_process_references()); 131 heap->set_unload_classes(heuristics->can_unload_classes()); 132 } else { 133 policy->record_explicit_to_full(); 134 mode = stw_full; 135 } 136 } else if (implicit_gc_requested) { 137 cause = _requested_gc_cause; 138 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); 139 140 heuristics->record_requested_gc(); 141 142 if (ShenandoahImplicitGCInvokesConcurrent) { 143 policy->record_implicit_to_concurrent(); 144 mode = default_mode; 145 146 // Unload and clean up everything 147 heap->set_process_references(heuristics->can_process_references()); 148 heap->set_unload_classes(heuristics->can_unload_classes()); 149 } else { 150 policy->record_implicit_to_full(); 151 mode = stw_full; 152 } 153 } else { 154 // Potential normal cycle: ask heuristics if it wants to act 155 if (heuristics->should_start_gc()) { 156 mode = default_mode; 157 cause = default_cause; 158 } 159 160 // Ask policy if this cycle wants to process references or unload classes 161 heap->set_process_references(heuristics->should_process_references()); 162 heap->set_unload_classes(heuristics->should_unload_classes()); 163 } 164 165 // Blow all soft references on this cycle, if handling allocation failure, 166 // or we are requested to do so unconditionally. 167 if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) { 168 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 169 } 170 171 bool gc_requested = (mode != none); 172 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 173 174 if (gc_requested) { 175 heap->reset_bytes_allocated_since_gc_start(); 176 177 // If GC was requested, we are sampling the counters even without actual triggers 178 // from allocation machinery. This captures GC phases more accurately. 179 set_forced_counters_update(true); 180 181 // If GC was requested, we better dump freeset data for performance debugging 182 { 183 ShenandoahHeapLocker locker(heap->lock()); 184 heap->free_set()->log_status(); 185 } 186 } 187 188 switch (mode) { 189 case none: 190 break; 191 case concurrent_traversal: 192 service_concurrent_traversal_cycle(cause); 193 break; 194 case concurrent_normal: 195 service_concurrent_normal_cycle(cause); 196 break; 197 case stw_degenerated: 198 service_stw_degenerated_cycle(cause, degen_point); 199 break; 200 case stw_full: 201 service_stw_full_cycle(cause); 202 break; 203 default: 204 ShouldNotReachHere(); 205 } 206 207 if (gc_requested) { 208 // If this was the requested GC cycle, notify waiters about it 209 if (explicit_gc_requested || implicit_gc_requested) { 210 notify_gc_waiters(); 211 } 212 213 // If this was the allocation failure GC cycle, notify waiters about it 214 if (alloc_failure_pending) { 215 notify_alloc_failure_waiters(); 216 } 217 218 // Report current free set state at the end of cycle, whether 219 // it is a normal completion, or the abort. 220 { 221 ShenandoahHeapLocker locker(heap->lock()); 222 heap->free_set()->log_status(); 223 224 // Notify Universe about new heap usage. This has implications for 225 // global soft refs policy, and we better report it every time heap 226 // usage goes down. 227 Universe::update_heap_info_at_gc(); 228 } 229 230 // Disable forced counters update, and update counters one more time 231 // to capture the state at the end of GC session. 232 handle_force_counters_update(); 233 set_forced_counters_update(false); 234 235 // Retract forceful part of soft refs policy 236 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 237 238 // Clear metaspace oom flag, if current cycle unloaded classes 239 if (heap->unload_classes()) { 240 heuristics->clear_metaspace_oom(); 241 } 242 243 // GC is over, we are at idle now 244 if (ShenandoahPacing) { 245 heap->pacer()->setup_for_idle(); 246 } 247 } else { 248 // Allow allocators to know we have seen this much regions 249 if (ShenandoahPacing && (allocs_seen > 0)) { 250 heap->pacer()->report_alloc(allocs_seen); 251 } 252 } 253 254 double current = os::elapsedTime(); 255 256 if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) { 257 // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything. 258 // Regular paths uncommit only occasionally. 259 double shrink_before = explicit_gc_requested ? 260 current : 261 current - (ShenandoahUncommitDelay / 1000.0); 262 service_uncommit(shrink_before); 263 last_shrink_time = current; 264 } 265 266 // Wait before performing the next action. If allocation happened during this wait, 267 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 268 // back off exponentially. 269 if (_heap_changed.try_unset()) { 270 sleep = ShenandoahControlIntervalMin; 271 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 272 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 273 last_sleep_adjust_time = current; 274 } 275 os::naked_short_sleep(sleep); 276 } 277 278 // Wait for the actual stop(), can't leave run_service() earlier. 279 while (!should_terminate()) { 280 os::naked_short_sleep(ShenandoahControlIntervalMin); 281 } 282 } 283 284 void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) { 285 GCIdMark gc_id_mark; 286 ShenandoahGCSession session(cause); 287 288 ShenandoahHeap* heap = ShenandoahHeap::heap(); 289 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 290 291 // Reset for upcoming cycle 292 heap->entry_reset(); 293 294 heap->vmop_entry_init_traversal(); 295 296 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; 297 298 heap->entry_traversal(); 299 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; 300 301 heap->vmop_entry_final_traversal(); 302 303 heap->entry_cleanup(); 304 305 heap->heuristics()->record_success_concurrent(); 306 heap->shenandoah_policy()->record_success_concurrent(); 307 } 308 309 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 310 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 311 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 312 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 313 // tries to evac something and no memory is available), cycle degrades to Full GC. 314 // 315 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when 316 // heuristics says there are no regions to compact, and all the collection comes from immediately 317 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the 318 // mark from the next cycle. 319 // 320 // ................................................................................................ 321 // 322 // (immediate garbage shortcut) Concurrent GC 323 // /-------------------------------------------\ 324 // | (coalesced UR) v 325 // | /----------------------->o 326 // | | | 327 // | | v 328 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 329 // | | | ^ 330 // | (af) | (af) | (af) | 331 // ..................|....................|.................|..............|....................... 332 // | | | | 333 // | | | | Degenerated GC 334 // v v v | 335 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 336 // | | | ^ 337 // | (af) | (af) | (af) | 338 // ..................|....................|.................|..............|....................... 339 // | | | | 340 // | v | | Full GC 341 // \------------------->o<----------------/ | 342 // | | 343 // v | 344 // Full GC --------------------------/ 345 // 346 ShenandoahHeap* heap = ShenandoahHeap::heap(); 347 348 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; 349 350 GCIdMark gc_id_mark; 351 ShenandoahGCSession session(cause); 352 353 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 354 355 // Reset for upcoming marking 356 heap->entry_reset(); 357 358 // Start initial mark under STW 359 heap->vmop_entry_init_mark(); 360 361 // Continue concurrent mark 362 heap->entry_mark(); 363 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; 364 365 // If not cancelled, can try to concurrently pre-clean 366 heap->entry_preclean(); 367 368 // Complete marking under STW, and start evacuation 369 heap->vmop_entry_final_mark(); 370 371 // Evacuate concurrent roots 372 heap->entry_roots(); 373 374 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 375 // the space. This would be the last action if there is nothing to evacuate. 376 heap->entry_cleanup(); 377 378 { 379 ShenandoahHeapLocker locker(heap->lock()); 380 heap->free_set()->log_status(); 381 } 382 383 // Continue the cycle with evacuation and optional update-refs. 384 // This may be skipped if there is nothing to evacuate. 385 // If so, evac_in_progress would be unset by collection set preparation code. 386 if (heap->is_evacuation_in_progress()) { 387 // Concurrently evacuate 388 heap->entry_evac(); 389 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; 390 391 // Perform update-refs phase, if required. This phase can be skipped if heuristics 392 // decides to piggy-back the update-refs on the next marking cycle. On either path, 393 // we need to turn off evacuation: either in init-update-refs, or in final-evac. 394 if (heap->heuristics()->should_start_update_refs()) { 395 heap->vmop_entry_init_updaterefs(); 396 heap->entry_updaterefs(); 397 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; 398 399 heap->vmop_entry_final_updaterefs(); 400 401 // Update references freed up collection set, kick the cleanup to reclaim the space. 402 heap->entry_cleanup(); 403 404 } else { 405 heap->vmop_entry_final_evac(); 406 } 407 } 408 409 // Cycle is complete 410 heap->heuristics()->record_success_concurrent(); 411 heap->shenandoah_policy()->record_success_concurrent(); 412 } 413 414 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { 415 ShenandoahHeap* heap = ShenandoahHeap::heap(); 416 if (heap->cancelled_gc()) { 417 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 418 if (!in_graceful_shutdown()) { 419 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, 420 "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)); 421 _degen_point = point; 422 } 423 return true; 424 } 425 return false; 426 } 427 428 void ShenandoahControlThread::stop_service() { 429 // Nothing to do here. 430 } 431 432 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { 433 GCIdMark gc_id_mark; 434 ShenandoahGCSession session(cause); 435 436 ShenandoahHeap* heap = ShenandoahHeap::heap(); 437 heap->vmop_entry_full(cause); 438 439 heap->heuristics()->record_success_full(); 440 heap->shenandoah_policy()->record_success_full(); 441 } 442 443 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { 444 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); 445 446 GCIdMark gc_id_mark; 447 ShenandoahGCSession session(cause); 448 449 ShenandoahHeap* heap = ShenandoahHeap::heap(); 450 heap->vmop_degenerated(point); 451 452 heap->heuristics()->record_success_degenerated(); 453 heap->shenandoah_policy()->record_success_degenerated(); 454 } 455 456 void ShenandoahControlThread::service_uncommit(double shrink_before) { 457 ShenandoahHeap* heap = ShenandoahHeap::heap(); 458 459 // Determine if there is work to do. This avoids taking heap lock if there is 460 // no work available, avoids spamming logs with superfluous logging messages, 461 // and minimises the amount of work while locks are taken. 462 463 if (heap->committed() <= heap->min_capacity()) return; 464 465 bool has_work = false; 466 for (size_t i = 0; i < heap->num_regions(); i++) { 467 ShenandoahHeapRegion *r = heap->get_region(i); 468 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 469 has_work = true; 470 break; 471 } 472 } 473 474 if (has_work) { 475 heap->entry_uncommit(shrink_before); 476 } 477 } 478 479 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { 480 return GCCause::is_user_requested_gc(cause) || 481 GCCause::is_serviceability_requested_gc(cause); 482 } 483 484 void ShenandoahControlThread::request_gc(GCCause::Cause cause) { 485 assert(GCCause::is_user_requested_gc(cause) || 486 GCCause::is_serviceability_requested_gc(cause) || 487 cause == GCCause::_metadata_GC_clear_soft_refs || 488 cause == GCCause::_full_gc_alot || 489 cause == GCCause::_wb_full_gc || 490 cause == GCCause::_scavenge_alot, 491 "only requested GCs here"); 492 493 if (is_explicit_gc(cause)) { 494 if (!DisableExplicitGC) { 495 handle_requested_gc(cause); 496 } 497 } else { 498 handle_requested_gc(cause); 499 } 500 } 501 502 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { 503 _requested_gc_cause = cause; 504 _gc_requested.set(); 505 MonitorLocker ml(&_gc_waiters_lock); 506 while (_gc_requested.is_set()) { 507 ml.wait(); 508 } 509 } 510 511 void ShenandoahControlThread::handle_alloc_failure(size_t words) { 512 ShenandoahHeap* heap = ShenandoahHeap::heap(); 513 514 assert(current()->is_Java_thread(), "expect Java thread here"); 515 516 if (try_set_alloc_failure_gc()) { 517 // Only report the first allocation failure 518 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s", 519 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 520 521 // Now that alloc failure GC is scheduled, we can abort everything else 522 heap->cancel_gc(GCCause::_allocation_failure); 523 } 524 525 MonitorLocker ml(&_alloc_failure_waiters_lock); 526 while (is_alloc_failure_gc()) { 527 ml.wait(); 528 } 529 } 530 531 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { 532 ShenandoahHeap* heap = ShenandoahHeap::heap(); 533 534 if (try_set_alloc_failure_gc()) { 535 // Only report the first allocation failure 536 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", 537 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 538 } 539 540 // Forcefully report allocation failure 541 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); 542 } 543 544 void ShenandoahControlThread::notify_alloc_failure_waiters() { 545 _alloc_failure_gc.unset(); 546 MonitorLocker ml(&_alloc_failure_waiters_lock); 547 ml.notify_all(); 548 } 549 550 bool ShenandoahControlThread::try_set_alloc_failure_gc() { 551 return _alloc_failure_gc.try_set(); 552 } 553 554 bool ShenandoahControlThread::is_alloc_failure_gc() { 555 return _alloc_failure_gc.is_set(); 556 } 557 558 void ShenandoahControlThread::notify_gc_waiters() { 559 _gc_requested.unset(); 560 MonitorLocker ml(&_gc_waiters_lock); 561 ml.notify_all(); 562 } 563 564 void ShenandoahControlThread::handle_counters_update() { 565 if (_do_counters_update.is_set()) { 566 _do_counters_update.unset(); 567 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 568 } 569 } 570 571 void ShenandoahControlThread::handle_force_counters_update() { 572 if (_force_counters_update.is_set()) { 573 _do_counters_update.unset(); // reset these too, we do update now! 574 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 575 } 576 } 577 578 void ShenandoahControlThread::notify_heap_changed() { 579 // This is called from allocation path, and thus should be fast. 580 581 // Update monitoring counters when we took a new region. This amortizes the 582 // update costs on slow path. 583 if (_do_counters_update.is_unset()) { 584 _do_counters_update.set(); 585 } 586 // Notify that something had changed. 587 if (_heap_changed.is_unset()) { 588 _heap_changed.set(); 589 } 590 } 591 592 void ShenandoahControlThread::pacing_notify_alloc(size_t words) { 593 assert(ShenandoahPacing, "should only call when pacing is enabled"); 594 Atomic::add(words, &_allocs_seen); 595 } 596 597 void ShenandoahControlThread::set_forced_counters_update(bool value) { 598 _force_counters_update.set_cond(value); 599 } 600 601 void ShenandoahControlThread::print() const { 602 print_on(tty); 603 } 604 605 void ShenandoahControlThread::print_on(outputStream* st) const { 606 st->print("Shenandoah Concurrent Thread"); 607 Thread::print_on(st); 608 st->cr(); 609 } 610 611 void ShenandoahControlThread::start() { 612 create_and_start(); 613 } 614 615 void ShenandoahControlThread::prepare_for_graceful_shutdown() { 616 _graceful_shutdown.set(); 617 } 618 619 bool ShenandoahControlThread::in_graceful_shutdown() { 620 return _graceful_shutdown.is_set(); 621 }