1 /* 2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 32 #include "gc/shenandoah/shenandoahControlThread.hpp" 33 #include "gc/shenandoah/shenandoahUtils.hpp" 34 #include "gc/shenandoah/shenandoahVMOperations.hpp" 35 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 36 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/universe.hpp" 39 40 ShenandoahControlThread::ShenandoahControlThread() : 41 ConcurrentGCThread(), 42 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always), 43 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always), 44 _periodic_task(this), 45 _requested_gc_cause(GCCause::_no_cause_specified), 46 _degen_point(ShenandoahHeap::_degenerated_outside_cycle), 47 _allocs_seen(0) { 48 49 reset_gc_id(); 50 create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority); 51 _periodic_task.enroll(); 52 _periodic_satb_flush_task.enroll(); 53 } 54 55 ShenandoahControlThread::~ShenandoahControlThread() { 56 // This is here so that super is called. 57 } 58 59 void ShenandoahPeriodicTask::task() { 60 _thread->handle_force_counters_update(); 61 _thread->handle_counters_update(); 62 } 63 64 void ShenandoahPeriodicSATBFlushTask::task() { 65 ShenandoahHeap::heap()->force_satb_flush_all_threads(); 66 } 67 68 void ShenandoahControlThread::run_service() { 69 ShenandoahHeap* heap = ShenandoahHeap::heap(); 70 71 GCMode default_mode = concurrent_normal; 72 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; 73 int sleep = ShenandoahControlIntervalMin; 74 75 double last_shrink_time = os::elapsedTime(); 76 double last_sleep_adjust_time = os::elapsedTime(); 77 78 // Shrink period avoids constantly polling regions for shrinking. 79 // Having a period 10x lower than the delay would mean we hit the 80 // shrinking with lag of less than 1/10-th of true delay. 81 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 82 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 83 84 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); 85 ShenandoahHeuristics* heuristics = heap->heuristics(); 86 while (!in_graceful_shutdown() && !should_terminate()) { 87 // Figure out if we have pending requests. 88 bool alloc_failure_pending = _alloc_failure_gc.is_set(); 89 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause); 90 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); 91 92 // This control loop iteration have seen this much allocations. 93 size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen); 94 95 // Choose which GC mode to run in. The block below should select a single mode. 96 GCMode mode = none; 97 GCCause::Cause cause = GCCause::_last_gc_cause; 98 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; 99 100 if (alloc_failure_pending) { 101 // Allocation failure takes precedence: we have to deal with it first thing 102 log_info(gc)("Trigger: Handle Allocation Failure"); 103 104 cause = GCCause::_allocation_failure; 105 106 // Consume the degen point, and seed it with default value 107 degen_point = _degen_point; 108 _degen_point = ShenandoahHeap::_degenerated_outside_cycle; 109 110 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { 111 heuristics->record_allocation_failure_gc(); 112 policy->record_alloc_failure_to_degenerated(degen_point); 113 mode = stw_degenerated; 114 } else { 115 heuristics->record_allocation_failure_gc(); 116 policy->record_alloc_failure_to_full(); 117 mode = stw_full; 118 } 119 120 } else if (explicit_gc_requested) { 121 cause = _requested_gc_cause; 122 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); 123 124 heuristics->record_requested_gc(); 125 126 if (ExplicitGCInvokesConcurrent) { 127 policy->record_explicit_to_concurrent(); 128 mode = default_mode; 129 // Unload and clean up everything 130 heap->set_process_references(heuristics->can_process_references()); 131 heap->set_unload_classes(heuristics->can_unload_classes()); 132 } else { 133 policy->record_explicit_to_full(); 134 mode = stw_full; 135 } 136 } else if (implicit_gc_requested) { 137 cause = _requested_gc_cause; 138 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); 139 140 heuristics->record_requested_gc(); 141 142 if (ShenandoahImplicitGCInvokesConcurrent) { 143 policy->record_implicit_to_concurrent(); 144 mode = default_mode; 145 146 // Unload and clean up everything 147 heap->set_process_references(heuristics->can_process_references()); 148 heap->set_unload_classes(heuristics->can_unload_classes()); 149 } else { 150 policy->record_implicit_to_full(); 151 mode = stw_full; 152 } 153 } else { 154 // Potential normal cycle: ask heuristics if it wants to act 155 if (heuristics->should_start_gc()) { 156 mode = default_mode; 157 cause = default_cause; 158 } 159 160 // Ask policy if this cycle wants to process references or unload classes 161 heap->set_process_references(heuristics->should_process_references()); 162 heap->set_unload_classes(heuristics->should_unload_classes()); 163 } 164 165 // Blow all soft references on this cycle, if handling allocation failure, 166 // or we are requested to do so unconditionally. 167 if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) { 168 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 169 } 170 171 bool gc_requested = (mode != none); 172 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 173 174 if (gc_requested) { 175 // GC is starting, bump the internal ID 176 update_gc_id(); 177 178 heap->reset_bytes_allocated_since_gc_start(); 179 180 // Capture metaspace usage before GC. 181 const size_t metadata_prev_used = MetaspaceUtils::used_bytes(); 182 183 // If GC was requested, we are sampling the counters even without actual triggers 184 // from allocation machinery. This captures GC phases more accurately. 185 set_forced_counters_update(true); 186 187 // If GC was requested, we better dump freeset data for performance debugging 188 { 189 ShenandoahHeapLocker locker(heap->lock()); 190 heap->free_set()->log_status(); 191 } 192 193 switch (mode) { 194 case none: 195 break; 196 case concurrent_normal: 197 service_concurrent_normal_cycle(cause); 198 break; 199 case stw_degenerated: 200 service_stw_degenerated_cycle(cause, degen_point); 201 break; 202 case stw_full: 203 service_stw_full_cycle(cause); 204 break; 205 default: 206 ShouldNotReachHere(); 207 } 208 209 // If this was the requested GC cycle, notify waiters about it 210 if (explicit_gc_requested || implicit_gc_requested) { 211 notify_gc_waiters(); 212 } 213 214 // If this was the allocation failure GC cycle, notify waiters about it 215 if (alloc_failure_pending) { 216 notify_alloc_failure_waiters(); 217 } 218 219 // Report current free set state at the end of cycle, whether 220 // it is a normal completion, or the abort. 221 { 222 ShenandoahHeapLocker locker(heap->lock()); 223 heap->free_set()->log_status(); 224 225 // Notify Universe about new heap usage. This has implications for 226 // global soft refs policy, and we better report it every time heap 227 // usage goes down. 228 Universe::update_heap_info_at_gc(); 229 } 230 231 // Disable forced counters update, and update counters one more time 232 // to capture the state at the end of GC session. 233 handle_force_counters_update(); 234 set_forced_counters_update(false); 235 236 // Retract forceful part of soft refs policy 237 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 238 239 // Clear metaspace oom flag, if current cycle unloaded classes 240 if (heap->unload_classes()) { 241 heuristics->clear_metaspace_oom(); 242 } 243 244 // Commit worker statistics to cycle data 245 heap->phase_timings()->flush_par_workers_to_cycle(); 246 247 // Print GC stats for current cycle 248 { 249 LogTarget(Info, gc, stats) lt; 250 if (lt.is_enabled()) { 251 ResourceMark rm; 252 LogStream ls(lt); 253 heap->phase_timings()->print_cycle_on(&ls); 254 } 255 } 256 257 // Commit statistics to globals 258 heap->phase_timings()->flush_cycle_to_global(); 259 260 // Print Metaspace change following GC (if logging is enabled). 261 MetaspaceUtils::print_metaspace_change(metadata_prev_used); 262 263 // GC is over, we are at idle now 264 if (ShenandoahPacing) { 265 heap->pacer()->setup_for_idle(); 266 } 267 } else { 268 // Allow allocators to know we have seen this much regions 269 if (ShenandoahPacing && (allocs_seen > 0)) { 270 heap->pacer()->report_alloc(allocs_seen); 271 } 272 } 273 274 double current = os::elapsedTime(); 275 276 if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) { 277 // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything. 278 // Regular paths uncommit only occasionally. 279 double shrink_before = explicit_gc_requested ? 280 current : 281 current - (ShenandoahUncommitDelay / 1000.0); 282 service_uncommit(shrink_before); 283 heap->phase_timings()->flush_cycle_to_global(); 284 last_shrink_time = current; 285 } 286 287 // Wait before performing the next action. If allocation happened during this wait, 288 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 289 // back off exponentially. 290 if (_heap_changed.try_unset()) { 291 sleep = ShenandoahControlIntervalMin; 292 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 293 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 294 last_sleep_adjust_time = current; 295 } 296 os::naked_short_sleep(sleep); 297 } 298 299 // Wait for the actual stop(), can't leave run_service() earlier. 300 while (!should_terminate()) { 301 os::naked_short_sleep(ShenandoahControlIntervalMin); 302 } 303 } 304 305 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 306 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 307 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 308 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 309 // tries to evac something and no memory is available), cycle degrades to Full GC. 310 // 311 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 312 // heuristics says there are no regions to compact, and all the collection comes from immediately 313 // reclaimable regions. 314 // 315 // ................................................................................................ 316 // 317 // (immediate garbage shortcut) Concurrent GC 318 // /-------------------------------------------\ 319 // | | 320 // | | 321 // | | 322 // | v 323 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 324 // | | | ^ 325 // | (af) | (af) | (af) | 326 // ..................|....................|.................|..............|....................... 327 // | | | | 328 // | | | | Degenerated GC 329 // v v v | 330 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 331 // | | | ^ 332 // | (af) | (af) | (af) | 333 // ..................|....................|.................|..............|....................... 334 // | | | | 335 // | v | | Full GC 336 // \------------------->o<----------------/ | 337 // | | 338 // v | 339 // Full GC --------------------------/ 340 // 341 ShenandoahHeap* heap = ShenandoahHeap::heap(); 342 343 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; 344 345 GCIdMark gc_id_mark; 346 ShenandoahGCSession session(cause); 347 348 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 349 350 // Reset for upcoming marking 351 heap->entry_reset(); 352 353 // Start initial mark under STW 354 heap->vmop_entry_init_mark(); 355 356 // Continue concurrent mark 357 heap->entry_mark(); 358 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; 359 360 // If not cancelled, can try to concurrently pre-clean 361 heap->entry_preclean(); 362 363 // Complete marking under STW, and start evacuation 364 heap->vmop_entry_final_mark(); 365 366 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 367 // the space. This would be the last action if there is nothing to evacuate. 368 heap->entry_cleanup_early(); 369 370 { 371 ShenandoahHeapLocker locker(heap->lock()); 372 heap->free_set()->log_status(); 373 } 374 375 // Continue the cycle with evacuation and optional update-refs. 376 // This may be skipped if there is nothing to evacuate. 377 // If so, evac_in_progress would be unset by collection set preparation code. 378 if (heap->is_evacuation_in_progress()) { 379 // Concurrently evacuate 380 heap->entry_evac(); 381 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; 382 383 // Perform update-refs phase. 384 heap->vmop_entry_init_updaterefs(); 385 heap->entry_updaterefs(); 386 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; 387 388 heap->vmop_entry_final_updaterefs(); 389 390 // Update references freed up collection set, kick the cleanup to reclaim the space. 391 heap->entry_cleanup_complete(); 392 } 393 394 // Cycle is complete 395 heap->heuristics()->record_success_concurrent(); 396 heap->shenandoah_policy()->record_success_concurrent(); 397 } 398 399 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { 400 ShenandoahHeap* heap = ShenandoahHeap::heap(); 401 if (heap->cancelled_gc()) { 402 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 403 if (!in_graceful_shutdown()) { 404 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, 405 "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)); 406 _degen_point = point; 407 } 408 return true; 409 } 410 return false; 411 } 412 413 void ShenandoahControlThread::stop_service() { 414 // Nothing to do here. 415 } 416 417 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { 418 GCIdMark gc_id_mark; 419 ShenandoahGCSession session(cause); 420 421 ShenandoahHeap* heap = ShenandoahHeap::heap(); 422 heap->vmop_entry_full(cause); 423 424 heap->heuristics()->record_success_full(); 425 heap->shenandoah_policy()->record_success_full(); 426 } 427 428 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { 429 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); 430 431 GCIdMark gc_id_mark; 432 ShenandoahGCSession session(cause); 433 434 ShenandoahHeap* heap = ShenandoahHeap::heap(); 435 heap->vmop_degenerated(point); 436 437 heap->heuristics()->record_success_degenerated(); 438 heap->shenandoah_policy()->record_success_degenerated(); 439 } 440 441 void ShenandoahControlThread::service_uncommit(double shrink_before) { 442 ShenandoahHeap* heap = ShenandoahHeap::heap(); 443 444 // Determine if there is work to do. This avoids taking heap lock if there is 445 // no work available, avoids spamming logs with superfluous logging messages, 446 // and minimises the amount of work while locks are taken. 447 448 if (heap->committed() <= heap->min_capacity()) return; 449 450 bool has_work = false; 451 for (size_t i = 0; i < heap->num_regions(); i++) { 452 ShenandoahHeapRegion *r = heap->get_region(i); 453 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 454 has_work = true; 455 break; 456 } 457 } 458 459 if (has_work) { 460 heap->entry_uncommit(shrink_before); 461 } 462 } 463 464 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { 465 return GCCause::is_user_requested_gc(cause) || 466 GCCause::is_serviceability_requested_gc(cause); 467 } 468 469 void ShenandoahControlThread::request_gc(GCCause::Cause cause) { 470 assert(GCCause::is_user_requested_gc(cause) || 471 GCCause::is_serviceability_requested_gc(cause) || 472 cause == GCCause::_metadata_GC_clear_soft_refs || 473 cause == GCCause::_full_gc_alot || 474 cause == GCCause::_wb_full_gc || 475 cause == GCCause::_scavenge_alot, 476 "only requested GCs here"); 477 478 if (is_explicit_gc(cause)) { 479 if (!DisableExplicitGC) { 480 handle_requested_gc(cause); 481 } 482 } else { 483 handle_requested_gc(cause); 484 } 485 } 486 487 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { 488 // Make sure we have at least one complete GC cycle before unblocking 489 // from the explicit GC request. 490 // 491 // This is especially important for weak references cleanup and/or native 492 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 493 // comes very late in the already running cycle, it would miss lots of new 494 // opportunities for cleanup that were made available before the caller 495 // requested the GC. 496 size_t required_gc_id = get_gc_id() + 1; 497 498 MonitorLockerEx ml(&_gc_waiters_lock); 499 while (get_gc_id() < required_gc_id) { 500 _gc_requested.set(); 501 _requested_gc_cause = cause; 502 ml.wait(); 503 } 504 } 505 506 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) { 507 ShenandoahHeap* heap = ShenandoahHeap::heap(); 508 509 assert(current()->is_Java_thread(), "expect Java thread here"); 510 511 if (try_set_alloc_failure_gc()) { 512 // Only report the first allocation failure 513 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", 514 req.type_string(), 515 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); 516 517 // Now that alloc failure GC is scheduled, we can abort everything else 518 heap->cancel_gc(GCCause::_allocation_failure); 519 } 520 521 MonitorLockerEx ml(&_alloc_failure_waiters_lock); 522 while (is_alloc_failure_gc()) { 523 ml.wait(); 524 } 525 } 526 527 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { 528 ShenandoahHeap* heap = ShenandoahHeap::heap(); 529 530 if (try_set_alloc_failure_gc()) { 531 // Only report the first allocation failure 532 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", 533 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 534 } 535 536 // Forcefully report allocation failure 537 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); 538 } 539 540 void ShenandoahControlThread::notify_alloc_failure_waiters() { 541 _alloc_failure_gc.unset(); 542 MonitorLockerEx ml(&_alloc_failure_waiters_lock); 543 ml.notify_all(); 544 } 545 546 bool ShenandoahControlThread::try_set_alloc_failure_gc() { 547 return _alloc_failure_gc.try_set(); 548 } 549 550 bool ShenandoahControlThread::is_alloc_failure_gc() { 551 return _alloc_failure_gc.is_set(); 552 } 553 554 void ShenandoahControlThread::notify_gc_waiters() { 555 _gc_requested.unset(); 556 MonitorLockerEx ml(&_gc_waiters_lock); 557 ml.notify_all(); 558 } 559 560 void ShenandoahControlThread::handle_counters_update() { 561 if (_do_counters_update.is_set()) { 562 _do_counters_update.unset(); 563 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 564 } 565 } 566 567 void ShenandoahControlThread::handle_force_counters_update() { 568 if (_force_counters_update.is_set()) { 569 _do_counters_update.unset(); // reset these too, we do update now! 570 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 571 } 572 } 573 574 void ShenandoahControlThread::notify_heap_changed() { 575 // This is called from allocation path, and thus should be fast. 576 577 // Update monitoring counters when we took a new region. This amortizes the 578 // update costs on slow path. 579 if (_do_counters_update.is_unset()) { 580 _do_counters_update.set(); 581 } 582 // Notify that something had changed. 583 if (_heap_changed.is_unset()) { 584 _heap_changed.set(); 585 } 586 } 587 588 void ShenandoahControlThread::pacing_notify_alloc(size_t words) { 589 assert(ShenandoahPacing, "should only call when pacing is enabled"); 590 Atomic::add(words, &_allocs_seen); 591 } 592 593 void ShenandoahControlThread::set_forced_counters_update(bool value) { 594 _force_counters_update.set_cond(value); 595 } 596 597 void ShenandoahControlThread::reset_gc_id() { 598 OrderAccess::release_store_fence(&_gc_id, (size_t)0); 599 } 600 601 void ShenandoahControlThread::update_gc_id() { 602 Atomic::inc(&_gc_id); 603 } 604 605 size_t ShenandoahControlThread::get_gc_id() { 606 return OrderAccess::load_acquire(&_gc_id); 607 } 608 609 void ShenandoahControlThread::print() const { 610 print_on(tty); 611 } 612 613 void ShenandoahControlThread::print_on(outputStream* st) const { 614 st->print("Shenandoah Concurrent Thread"); 615 Thread::print_on(st); 616 st->cr(); 617 } 618 619 void ShenandoahControlThread::start() { 620 create_and_start(); 621 } 622 623 void ShenandoahControlThread::prepare_for_graceful_shutdown() { 624 _graceful_shutdown.set(); 625 } 626 627 bool ShenandoahControlThread::in_graceful_shutdown() { 628 return _graceful_shutdown.is_set(); 629 }