1 /* 2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahControlThread.hpp" 30 #include "gc/shenandoah/shenandoahFreeSet.hpp" 31 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 33 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 34 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "gc/shenandoah/shenandoahVMOperations.hpp" 37 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/universe.hpp" 41 #include "runtime/atomic.hpp" 42 43 ShenandoahControlThread::ShenandoahControlThread() : 44 ConcurrentGCThread(), 45 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always), 46 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always), 47 _periodic_task(this), 48 _requested_gc_cause(GCCause::_no_cause_specified), 49 _degen_point(ShenandoahHeap::_degenerated_outside_cycle), 50 _allocs_seen(0) { 51 52 reset_gc_id(); 53 create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority); 54 _periodic_task.enroll(); 55 _periodic_satb_flush_task.enroll(); 56 if (ShenandoahPacing) { 57 _periodic_pacer_notify_task.enroll(); 58 } 59 } 60 61 ShenandoahControlThread::~ShenandoahControlThread() { 62 // This is here so that super is called. 63 } 64 65 void ShenandoahPeriodicTask::task() { 66 _thread->handle_force_counters_update(); 67 _thread->handle_counters_update(); 68 } 69 70 void ShenandoahPeriodicSATBFlushTask::task() { 71 ShenandoahHeap::heap()->force_satb_flush_all_threads(); 72 } 73 74 void ShenandoahPeriodicPacerNotify::task() { 75 assert(ShenandoahPacing, "Should not be here otherwise"); 76 ShenandoahHeap::heap()->pacer()->notify_waiters(); 77 } 78 79 void ShenandoahControlThread::run_service() { 80 ShenandoahHeap* heap = ShenandoahHeap::heap(); 81 82 GCMode default_mode = concurrent_normal; 83 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; 84 int sleep = ShenandoahControlIntervalMin; 85 86 double last_shrink_time = os::elapsedTime(); 87 double last_sleep_adjust_time = os::elapsedTime(); 88 89 // Shrink period avoids constantly polling regions for shrinking. 90 // Having a period 10x lower than the delay would mean we hit the 91 // shrinking with lag of less than 1/10-th of true delay. 92 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 93 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 94 95 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); 96 ShenandoahHeuristics* heuristics = heap->heuristics(); 97 while (!in_graceful_shutdown() && !should_terminate()) { 98 // Figure out if we have pending requests. 99 bool alloc_failure_pending = _alloc_failure_gc.is_set(); 100 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause); 101 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); 102 103 // This control loop iteration have seen this much allocations. 104 size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0); 105 106 // Choose which GC mode to run in. The block below should select a single mode. 107 GCMode mode = none; 108 GCCause::Cause cause = GCCause::_last_gc_cause; 109 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; 110 111 if (alloc_failure_pending) { 112 // Allocation failure takes precedence: we have to deal with it first thing 113 log_info(gc)("Trigger: Handle Allocation Failure"); 114 115 cause = GCCause::_allocation_failure; 116 117 // Consume the degen point, and seed it with default value 118 degen_point = _degen_point; 119 _degen_point = ShenandoahHeap::_degenerated_outside_cycle; 120 121 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { 122 heuristics->record_allocation_failure_gc(); 123 policy->record_alloc_failure_to_degenerated(degen_point); 124 mode = stw_degenerated; 125 } else { 126 heuristics->record_allocation_failure_gc(); 127 policy->record_alloc_failure_to_full(); 128 mode = stw_full; 129 } 130 131 } else if (explicit_gc_requested) { 132 cause = _requested_gc_cause; 133 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); 134 135 heuristics->record_requested_gc(); 136 137 if (ExplicitGCInvokesConcurrent) { 138 policy->record_explicit_to_concurrent(); 139 mode = default_mode; 140 // Unload and clean up everything 141 heap->set_process_references(heuristics->can_process_references()); 142 heap->set_unload_classes(heuristics->can_unload_classes()); 143 } else { 144 policy->record_explicit_to_full(); 145 mode = stw_full; 146 } 147 } else if (implicit_gc_requested) { 148 cause = _requested_gc_cause; 149 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); 150 151 heuristics->record_requested_gc(); 152 153 if (ShenandoahImplicitGCInvokesConcurrent) { 154 policy->record_implicit_to_concurrent(); 155 mode = default_mode; 156 157 // Unload and clean up everything 158 heap->set_process_references(heuristics->can_process_references()); 159 heap->set_unload_classes(heuristics->can_unload_classes()); 160 } else { 161 policy->record_implicit_to_full(); 162 mode = stw_full; 163 } 164 } else { 165 // Potential normal cycle: ask heuristics if it wants to act 166 if (heuristics->should_start_gc()) { 167 mode = default_mode; 168 cause = default_cause; 169 } 170 171 // Ask policy if this cycle wants to process references or unload classes 172 heap->set_process_references(heuristics->should_process_references()); 173 heap->set_unload_classes(heuristics->should_unload_classes()); 174 } 175 176 // Blow all soft references on this cycle, if handling allocation failure, 177 // either implicit or explicit GC request, or we are requested to do so unconditionally. 178 if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) { 179 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 180 } 181 182 bool gc_requested = (mode != none); 183 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 184 185 if (gc_requested) { 186 // GC is starting, bump the internal ID 187 update_gc_id(); 188 189 heap->reset_bytes_allocated_since_gc_start(); 190 191 // Use default constructor to snapshot the Metaspace state before GC. 192 metaspace::MetaspaceSizesSnapshot meta_sizes; 193 194 // If GC was requested, we are sampling the counters even without actual triggers 195 // from allocation machinery. This captures GC phases more accurately. 196 set_forced_counters_update(true); 197 198 // If GC was requested, we better dump freeset data for performance debugging 199 { 200 ShenandoahHeapLocker locker(heap->lock()); 201 heap->free_set()->log_status(); 202 } 203 204 switch (mode) { 205 case concurrent_normal: 206 service_concurrent_normal_cycle(cause); 207 break; 208 case stw_degenerated: 209 service_stw_degenerated_cycle(cause, degen_point); 210 break; 211 case stw_full: 212 service_stw_full_cycle(cause); 213 break; 214 default: 215 ShouldNotReachHere(); 216 } 217 218 // If this was the requested GC cycle, notify waiters about it 219 if (explicit_gc_requested || implicit_gc_requested) { 220 notify_gc_waiters(); 221 } 222 223 // If this was the allocation failure GC cycle, notify waiters about it 224 if (alloc_failure_pending) { 225 notify_alloc_failure_waiters(); 226 } 227 228 // Report current free set state at the end of cycle, whether 229 // it is a normal completion, or the abort. 230 { 231 ShenandoahHeapLocker locker(heap->lock()); 232 heap->free_set()->log_status(); 233 234 // Notify Universe about new heap usage. This has implications for 235 // global soft refs policy, and we better report it every time heap 236 // usage goes down. 237 Universe::update_heap_info_at_gc(); 238 } 239 240 // Disable forced counters update, and update counters one more time 241 // to capture the state at the end of GC session. 242 handle_force_counters_update(); 243 set_forced_counters_update(false); 244 245 // Retract forceful part of soft refs policy 246 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false); 247 248 // Clear metaspace oom flag, if current cycle unloaded classes 249 if (heap->unload_classes()) { 250 heuristics->clear_metaspace_oom(); 251 } 252 253 // Commit worker statistics to cycle data 254 heap->phase_timings()->flush_par_workers_to_cycle(); 255 if (ShenandoahPacing) { 256 heap->pacer()->flush_stats_to_cycle(); 257 } 258 259 // Print GC stats for current cycle 260 { 261 LogTarget(Info, gc, stats) lt; 262 if (lt.is_enabled()) { 263 ResourceMark rm; 264 LogStream ls(lt); 265 heap->phase_timings()->print_cycle_on(&ls); 266 if (ShenandoahPacing) { 267 heap->pacer()->print_cycle_on(&ls); 268 } 269 } 270 } 271 272 // Commit statistics to globals 273 heap->phase_timings()->flush_cycle_to_global(); 274 275 // Print Metaspace change following GC (if logging is enabled). 276 MetaspaceUtils::print_metaspace_change(meta_sizes); 277 278 heap->next_whole_heap_examined(); 279 280 // GC is over, we are at idle now 281 if (ShenandoahPacing) { 282 heap->pacer()->setup_for_idle(); 283 } 284 } else { 285 // Allow allocators to know we have seen this much regions 286 if (ShenandoahPacing && (allocs_seen > 0)) { 287 heap->pacer()->report_alloc(allocs_seen); 288 } 289 } 290 291 double current = os::elapsedTime(); 292 293 if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) { 294 // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything. 295 // Regular paths uncommit only occasionally. 296 double shrink_before = explicit_gc_requested ? 297 current : 298 current - (ShenandoahUncommitDelay / 1000.0); 299 service_uncommit(shrink_before); 300 heap->phase_timings()->flush_cycle_to_global(); 301 last_shrink_time = current; 302 } 303 304 // Wait before performing the next action. If allocation happened during this wait, 305 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 306 // back off exponentially. 307 if (_heap_changed.try_unset()) { 308 sleep = ShenandoahControlIntervalMin; 309 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 310 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 311 last_sleep_adjust_time = current; 312 } 313 os::naked_short_sleep(sleep); 314 } 315 316 // Wait for the actual stop(), can't leave run_service() earlier. 317 while (!should_terminate()) { 318 os::naked_short_sleep(ShenandoahControlIntervalMin); 319 } 320 } 321 322 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 323 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 324 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 325 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 326 // tries to evac something and no memory is available), cycle degrades to Full GC. 327 // 328 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when 329 // heuristics says there are no regions to compact, and all the collection comes from immediately 330 // reclaimable regions. 331 // 332 // ................................................................................................ 333 // 334 // (immediate garbage shortcut) Concurrent GC 335 // /-------------------------------------------\ 336 // | | 337 // | | 338 // | | 339 // | v 340 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 341 // | | | ^ 342 // | (af) | (af) | (af) | 343 // ..................|....................|.................|..............|....................... 344 // | | | | 345 // | | | | Degenerated GC 346 // v v v | 347 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 348 // | | | ^ 349 // | (af) | (af) | (af) | 350 // ..................|....................|.................|..............|....................... 351 // | | | | 352 // | v | | Full GC 353 // \------------------->o<----------------/ | 354 // | | 355 // v | 356 // Full GC --------------------------/ 357 // 358 ShenandoahHeap* heap = ShenandoahHeap::heap(); 359 360 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; 361 362 GCIdMark gc_id_mark; 363 ShenandoahGCSession session(cause); 364 365 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 366 367 // Reset for upcoming marking 368 heap->entry_reset(); 369 370 // Start initial mark under STW 371 heap->vmop_entry_init_mark(); 372 373 // Continue concurrent mark 374 heap->entry_mark(); 375 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; 376 377 // If not cancelled, can try to concurrently pre-clean 378 heap->entry_preclean(); 379 380 // Complete marking under STW, and start evacuation 381 heap->vmop_entry_final_mark(); 382 383 // Process weak roots that might still point to regions that would be broken by cleanup 384 if (heap->is_concurrent_weak_root_in_progress()) { 385 heap->entry_weak_roots(); 386 } 387 388 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 389 // the space. This would be the last action if there is nothing to evacuate. 390 heap->entry_cleanup_early(); 391 392 { 393 ShenandoahHeapLocker locker(heap->lock()); 394 heap->free_set()->log_status(); 395 } 396 397 // Perform concurrent class unloading 398 if (heap->is_concurrent_weak_root_in_progress()) { 399 heap->entry_class_unloading(); 400 } 401 402 // Processing strong roots 403 // This may be skipped if there is nothing to update/evacuate. 404 // If so, strong_root_in_progress would be unset. 405 if (heap->is_concurrent_strong_root_in_progress()) { 406 heap->entry_strong_roots(); 407 } 408 409 // Continue the cycle with evacuation and optional update-refs. 410 // This may be skipped if there is nothing to evacuate. 411 // If so, evac_in_progress would be unset by collection set preparation code. 412 if (heap->is_evacuation_in_progress()) { 413 // Concurrently evacuate 414 heap->entry_evac(); 415 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; 416 417 // Perform update-refs phase. 418 heap->vmop_entry_init_updaterefs(); 419 heap->entry_updaterefs(); 420 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; 421 422 heap->vmop_entry_final_updaterefs(); 423 424 // Update references freed up collection set, kick the cleanup to reclaim the space. 425 heap->entry_cleanup_complete(); 426 } 427 428 // Cycle is complete 429 heap->heuristics()->record_success_concurrent(); 430 heap->shenandoah_policy()->record_success_concurrent(); 431 } 432 433 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { 434 ShenandoahHeap* heap = ShenandoahHeap::heap(); 435 if (heap->cancelled_gc()) { 436 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 437 if (!in_graceful_shutdown()) { 438 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, 439 "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)); 440 _degen_point = point; 441 } 442 return true; 443 } 444 return false; 445 } 446 447 void ShenandoahControlThread::stop_service() { 448 // Nothing to do here. 449 } 450 451 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { 452 GCIdMark gc_id_mark; 453 ShenandoahGCSession session(cause); 454 455 ShenandoahHeap* heap = ShenandoahHeap::heap(); 456 heap->vmop_entry_full(cause); 457 458 heap->heuristics()->record_success_full(); 459 heap->shenandoah_policy()->record_success_full(); 460 } 461 462 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { 463 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); 464 465 GCIdMark gc_id_mark; 466 ShenandoahGCSession session(cause); 467 468 ShenandoahHeap* heap = ShenandoahHeap::heap(); 469 heap->vmop_degenerated(point); 470 471 heap->heuristics()->record_success_degenerated(); 472 heap->shenandoah_policy()->record_success_degenerated(); 473 } 474 475 void ShenandoahControlThread::service_uncommit(double shrink_before) { 476 ShenandoahHeap* heap = ShenandoahHeap::heap(); 477 478 // Determine if there is work to do. This avoids taking heap lock if there is 479 // no work available, avoids spamming logs with superfluous logging messages, 480 // and minimises the amount of work while locks are taken. 481 482 if (heap->committed() <= heap->min_capacity()) return; 483 484 bool has_work = false; 485 for (size_t i = 0; i < heap->num_regions(); i++) { 486 ShenandoahHeapRegion *r = heap->get_region(i); 487 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 488 has_work = true; 489 break; 490 } 491 } 492 493 if (has_work) { 494 heap->entry_uncommit(shrink_before); 495 } 496 } 497 498 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { 499 return GCCause::is_user_requested_gc(cause) || 500 GCCause::is_serviceability_requested_gc(cause); 501 } 502 503 void ShenandoahControlThread::request_gc(GCCause::Cause cause) { 504 assert(GCCause::is_user_requested_gc(cause) || 505 GCCause::is_serviceability_requested_gc(cause) || 506 cause == GCCause::_metadata_GC_clear_soft_refs || 507 cause == GCCause::_full_gc_alot || 508 cause == GCCause::_wb_full_gc || 509 cause == GCCause::_scavenge_alot, 510 "only requested GCs here"); 511 512 if (is_explicit_gc(cause)) { 513 if (!DisableExplicitGC) { 514 handle_requested_gc(cause); 515 } 516 } else { 517 handle_requested_gc(cause); 518 } 519 } 520 521 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { 522 // Make sure we have at least one complete GC cycle before unblocking 523 // from the explicit GC request. 524 // 525 // This is especially important for weak references cleanup and/or native 526 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request 527 // comes very late in the already running cycle, it would miss lots of new 528 // opportunities for cleanup that were made available before the caller 529 // requested the GC. 530 531 MonitorLocker ml(&_gc_waiters_lock); 532 size_t current_gc_id = get_gc_id(); 533 size_t required_gc_id = current_gc_id + 1; 534 while (current_gc_id < required_gc_id) { 535 _gc_requested.set(); 536 _requested_gc_cause = cause; 537 ml.wait(); 538 current_gc_id = get_gc_id(); 539 } 540 } 541 542 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) { 543 ShenandoahHeap* heap = ShenandoahHeap::heap(); 544 545 assert(current()->is_Java_thread(), "expect Java thread here"); 546 547 if (try_set_alloc_failure_gc()) { 548 // Only report the first allocation failure 549 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", 550 req.type_string(), 551 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); 552 553 // Now that alloc failure GC is scheduled, we can abort everything else 554 heap->cancel_gc(GCCause::_allocation_failure); 555 } 556 557 MonitorLocker ml(&_alloc_failure_waiters_lock); 558 while (is_alloc_failure_gc()) { 559 ml.wait(); 560 } 561 } 562 563 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { 564 ShenandoahHeap* heap = ShenandoahHeap::heap(); 565 566 if (try_set_alloc_failure_gc()) { 567 // Only report the first allocation failure 568 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", 569 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 570 } 571 572 // Forcefully report allocation failure 573 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); 574 } 575 576 void ShenandoahControlThread::notify_alloc_failure_waiters() { 577 _alloc_failure_gc.unset(); 578 MonitorLocker ml(&_alloc_failure_waiters_lock); 579 ml.notify_all(); 580 } 581 582 bool ShenandoahControlThread::try_set_alloc_failure_gc() { 583 return _alloc_failure_gc.try_set(); 584 } 585 586 bool ShenandoahControlThread::is_alloc_failure_gc() { 587 return _alloc_failure_gc.is_set(); 588 } 589 590 void ShenandoahControlThread::notify_gc_waiters() { 591 _gc_requested.unset(); 592 MonitorLocker ml(&_gc_waiters_lock); 593 ml.notify_all(); 594 } 595 596 void ShenandoahControlThread::handle_counters_update() { 597 if (_do_counters_update.is_set()) { 598 _do_counters_update.unset(); 599 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 600 } 601 } 602 603 void ShenandoahControlThread::handle_force_counters_update() { 604 if (_force_counters_update.is_set()) { 605 _do_counters_update.unset(); // reset these too, we do update now! 606 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 607 } 608 } 609 610 void ShenandoahControlThread::notify_heap_changed() { 611 // This is called from allocation path, and thus should be fast. 612 613 // Update monitoring counters when we took a new region. This amortizes the 614 // update costs on slow path. 615 if (_do_counters_update.is_unset()) { 616 _do_counters_update.set(); 617 } 618 // Notify that something had changed. 619 if (_heap_changed.is_unset()) { 620 _heap_changed.set(); 621 } 622 } 623 624 void ShenandoahControlThread::pacing_notify_alloc(size_t words) { 625 assert(ShenandoahPacing, "should only call when pacing is enabled"); 626 Atomic::add(&_allocs_seen, words); 627 } 628 629 void ShenandoahControlThread::set_forced_counters_update(bool value) { 630 _force_counters_update.set_cond(value); 631 } 632 633 void ShenandoahControlThread::reset_gc_id() { 634 Atomic::store(&_gc_id, (size_t)0); 635 } 636 637 void ShenandoahControlThread::update_gc_id() { 638 Atomic::inc(&_gc_id); 639 } 640 641 size_t ShenandoahControlThread::get_gc_id() { 642 return Atomic::load(&_gc_id); 643 } 644 645 void ShenandoahControlThread::print() const { 646 print_on(tty); 647 } 648 649 void ShenandoahControlThread::print_on(outputStream* st) const { 650 st->print("Shenandoah Concurrent Thread"); 651 Thread::print_on(st); 652 st->cr(); 653 } 654 655 void ShenandoahControlThread::start() { 656 create_and_start(); 657 } 658 659 void ShenandoahControlThread::prepare_for_graceful_shutdown() { 660 _graceful_shutdown.set(); 661 } 662 663 bool ShenandoahControlThread::in_graceful_shutdown() { 664 return _graceful_shutdown.is_set(); 665 }