1 /* 2 * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc_implementation/shared/gcTimer.hpp" 27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" 28 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" 29 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp" 30 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" 31 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" 32 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 33 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp" 34 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" 35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" 36 #include "gc_implementation/shenandoah/shenandoahTraversalGC.hpp" 37 #include "gc_implementation/shenandoah/shenandoahUtils.hpp" 38 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" 39 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" 40 #include "memory/iterator.hpp" 41 #include "memory/universe.hpp" 42 43 #ifdef _WINDOWS 44 #pragma warning(disable : 4355) 45 #endif 46 47 SurrogateLockerThread* ShenandoahControlThread::_slt = NULL; 48 49 ShenandoahControlThread::ShenandoahControlThread() : 50 ConcurrentGCThread(), 51 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true), 52 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true), 53 _periodic_task(this), 54 _requested_gc_cause(GCCause::_no_cause_specified), 55 _degen_point(ShenandoahHeap::_degenerated_outside_cycle), 56 _allocs_seen(0) { 57 58 if (os::create_thread(this, os::cgc_thread)) { 59 int native_prio; 60 if (ShenandoahCriticalControlThreadPriority) { 61 native_prio = os::java_to_os_priority[CriticalPriority]; 62 } else { 63 native_prio = os::java_to_os_priority[NearMaxPriority]; 64 } 65 os::set_native_priority(this, native_prio); 66 if (!_should_terminate && !DisableStartThread) { 67 os::start_thread(this); 68 } 69 } 70 71 _periodic_task.enroll(); 72 _periodic_satb_flush_task.enroll(); 73 } 74 75 ShenandoahControlThread::~ShenandoahControlThread() { 76 // This is here so that super is called. 77 } 78 79 void ShenandoahPeriodicTask::task() { 80 _thread->handle_force_counters_update(); 81 _thread->handle_counters_update(); 82 } 83 84 void ShenandoahPeriodicSATBFlushTask::task() { 85 ShenandoahHeap::heap()->force_satb_flush_all_threads(); 86 } 87 88 void ShenandoahControlThread::run() { 89 initialize_in_thread(); 90 91 wait_for_universe_init(); 92 93 // Wait until we have the surrogate locker thread in place. 94 { 95 MutexLockerEx x(CGC_lock, true); 96 while(_slt == NULL && !_should_terminate) { 97 CGC_lock->wait(true, 200); 98 } 99 } 100 101 ShenandoahHeap* heap = ShenandoahHeap::heap(); 102 103 GCMode default_mode = heap->is_traversal_mode() ? 104 concurrent_traversal : concurrent_normal; 105 GCCause::Cause default_cause = heap->is_traversal_mode() ? 106 GCCause::_shenandoah_traversal_gc : GCCause::_shenandoah_concurrent_gc; 107 int sleep = ShenandoahControlIntervalMin; 108 109 double last_shrink_time = os::elapsedTime(); 110 double last_sleep_adjust_time = os::elapsedTime(); 111 112 // Shrink period avoids constantly polling regions for shrinking. 113 // Having a period 10x lower than the delay would mean we hit the 114 // shrinking with lag of less than 1/10-th of true delay. 115 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 116 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 117 118 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); 119 120 ShenandoahHeuristics* heuristics = heap->heuristics(); 121 while (!in_graceful_shutdown() && !_should_terminate) { 122 // Figure out if we have pending requests. 123 bool alloc_failure_pending = _alloc_failure_gc.is_set(); 124 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause); 125 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); 126 127 // This control loop iteration have seen this much allocations. 128 intptr_t allocs_seen = (intptr_t)(Atomic::xchg_ptr(0, &_allocs_seen)); 129 130 // Choose which GC mode to run in. The block below should select a single mode. 131 GCMode mode = none; 132 GCCause::Cause cause = GCCause::_last_gc_cause; 133 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; 134 135 if (alloc_failure_pending) { 136 // Allocation failure takes precedence: we have to deal with it first thing 137 log_info(gc)("Trigger: Handle Allocation Failure"); 138 139 cause = GCCause::_allocation_failure; 140 141 // Consume the degen point, and seed it with default value 142 degen_point = _degen_point; 143 _degen_point = ShenandoahHeap::_degenerated_outside_cycle; 144 145 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { 146 heuristics->record_allocation_failure_gc(); 147 policy->record_alloc_failure_to_degenerated(degen_point); 148 mode = stw_degenerated; 149 } else { 150 heuristics->record_allocation_failure_gc(); 151 policy->record_alloc_failure_to_full(); 152 mode = stw_full; 153 } 154 155 } else if (explicit_gc_requested) { 156 cause = _requested_gc_cause; 157 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); 158 159 heuristics->record_requested_gc(); 160 161 if (ExplicitGCInvokesConcurrent) { 162 policy->record_explicit_to_concurrent(); 163 mode = default_mode; 164 // Unload and clean up everything 165 heap->set_process_references(heuristics->can_process_references()); 166 heap->set_unload_classes(heuristics->can_unload_classes()); 167 } else { 168 policy->record_explicit_to_full(); 169 mode = stw_full; 170 } 171 } else if (implicit_gc_requested) { 172 cause = _requested_gc_cause; 173 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); 174 175 heuristics->record_requested_gc(); 176 177 if (ShenandoahImplicitGCInvokesConcurrent) { 178 policy->record_implicit_to_concurrent(); 179 mode = default_mode; 180 181 // Unload and clean up everything 182 heap->set_process_references(heuristics->can_process_references()); 183 heap->set_unload_classes(heuristics->can_unload_classes()); 184 } else { 185 policy->record_implicit_to_full(); 186 mode = stw_full; 187 } 188 } else { 189 // Potential normal cycle: ask heuristics if it wants to act 190 if (heuristics->should_start_gc()) { 191 mode = default_mode; 192 cause = default_cause; 193 } 194 195 // Ask policy if this cycle wants to process references or unload classes 196 heap->set_process_references(heuristics->should_process_references()); 197 heap->set_unload_classes(heuristics->should_unload_classes()); 198 } 199 200 // Blow all soft references on this cycle, if handling allocation failure, 201 // or we are requested to do so unconditionally. 202 if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) { 203 heap->collector_policy()->set_should_clear_all_soft_refs(true); 204 } 205 206 bool gc_requested = (mode != none); 207 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 208 209 if (gc_requested) { 210 heap->reset_bytes_allocated_since_gc_start(); 211 212 // If GC was requested, we are sampling the counters even without actual triggers 213 // from allocation machinery. This captures GC phases more accurately. 214 set_forced_counters_update(true); 215 216 // If GC was requested, we better dump freeset data for performance debugging 217 { 218 ShenandoahHeapLocker locker(heap->lock()); 219 heap->free_set()->log_status(); 220 } 221 } 222 223 switch (mode) { 224 case none: 225 break; 226 case concurrent_traversal: 227 service_concurrent_traversal_cycle(cause); 228 break; 229 case concurrent_normal: 230 service_concurrent_normal_cycle(cause); 231 break; 232 case stw_degenerated: 233 service_stw_degenerated_cycle(cause, degen_point); 234 break; 235 case stw_full: 236 service_stw_full_cycle(cause); 237 break; 238 default: 239 ShouldNotReachHere(); 240 } 241 242 if (gc_requested) { 243 // If this was the requested GC cycle, notify waiters about it 244 if (explicit_gc_requested || implicit_gc_requested) { 245 notify_gc_waiters(); 246 } 247 248 // If this was the allocation failure GC cycle, notify waiters about it 249 if (alloc_failure_pending) { 250 notify_alloc_failure_waiters(); 251 } 252 253 // Report current free set state at the end of cycle, whether 254 // it is a normal completion, or the abort. 255 { 256 ShenandoahHeapLocker locker(heap->lock()); 257 heap->free_set()->log_status(); 258 259 // Notify Universe about new heap usage. This has implications for 260 // global soft refs policy, and we better report it every time heap 261 // usage goes down. 262 Universe::update_heap_info_at_gc(); 263 } 264 265 // Disable forced counters update, and update counters one more time 266 // to capture the state at the end of GC session. 267 handle_force_counters_update(); 268 set_forced_counters_update(false); 269 270 // Retract forceful part of soft refs policy 271 heap->collector_policy()->set_should_clear_all_soft_refs(false); 272 273 // Clear metaspace oom flag, if current cycle unloaded classes 274 if (heap->unload_classes()) { 275 heuristics->clear_metaspace_oom(); 276 } 277 278 // GC is over, we are at idle now 279 if (ShenandoahPacing) { 280 heap->pacer()->setup_for_idle(); 281 } 282 } else { 283 // Allow allocators to know we have seen this much regions 284 if (ShenandoahPacing && (allocs_seen > 0)) { 285 heap->pacer()->report_alloc(allocs_seen); 286 } 287 } 288 289 double current = os::elapsedTime(); 290 291 if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) { 292 // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything. 293 // Regular paths uncommit only occasionally. 294 double shrink_before = explicit_gc_requested ? 295 current : 296 current - (ShenandoahUncommitDelay / 1000.0); 297 service_uncommit(shrink_before); 298 last_shrink_time = current; 299 } 300 301 // Wait before performing the next action. If allocation happened during this wait, 302 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 303 // back off exponentially. 304 if (_heap_changed.try_unset()) { 305 sleep = ShenandoahControlIntervalMin; 306 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 307 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 308 last_sleep_adjust_time = current; 309 } 310 os::naked_short_sleep(sleep); 311 } 312 313 // Wait for the actual stop(), can't leave run_service() earlier. 314 while (! _should_terminate) { 315 os::naked_short_sleep(ShenandoahControlIntervalMin); 316 } 317 terminate(); 318 } 319 320 void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) { 321 ShenandoahGCSession session(cause); 322 323 ShenandoahHeap* heap = ShenandoahHeap::heap(); 324 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 325 326 // Reset for upcoming cycle 327 heap->entry_reset(); 328 329 heap->vmop_entry_init_traversal(); 330 331 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; 332 333 heap->entry_traversal(); 334 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; 335 336 heap->vmop_entry_final_traversal(); 337 338 heap->entry_cleanup(); 339 340 heap->heuristics()->record_success_concurrent(); 341 heap->shenandoah_policy()->record_success_concurrent(); 342 } 343 344 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 345 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 346 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 347 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 348 // tries to evac something and no memory is available), cycle degrades to Full GC. 349 // 350 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when 351 // heuristics says there are no regions to compact, and all the collection comes from immediately 352 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the 353 // mark from the next cycle. 354 // 355 // ................................................................................................ 356 // 357 // (immediate garbage shortcut) Concurrent GC 358 // /-------------------------------------------\ 359 // | (coalesced UR) v 360 // | /----------------------->o 361 // | | | 362 // | | v 363 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 364 // | | | ^ 365 // | (af) | (af) | (af) | 366 // ..................|....................|.................|..............|....................... 367 // | | | | 368 // | | | | Degenerated GC 369 // v v v | 370 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o 371 // | | | ^ 372 // | (af) | (af) | (af) | 373 // ..................|....................|.................|..............|....................... 374 // | | | | 375 // | v | | Full GC 376 // \------------------->o<----------------/ | 377 // | | 378 // v | 379 // Full GC --------------------------/ 380 // 381 382 ShenandoahHeap* heap = ShenandoahHeap::heap(); 383 384 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; 385 386 ShenandoahGCSession session(cause); 387 388 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 389 390 // Reset for upcoming marking 391 heap->entry_reset(); 392 393 // Start initial mark under STW 394 heap->vmop_entry_init_mark(); 395 396 // Continue concurrent mark 397 heap->entry_mark(); 398 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; 399 400 // If not cancelled, can try to concurrently pre-clean 401 heap->entry_preclean(); 402 403 // Complete marking under STW, and start evacuation 404 heap->vmop_entry_final_mark(); 405 406 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim 407 // the space. This would be the last action if there is nothing to evacuate. 408 heap->entry_cleanup(); 409 410 { 411 ShenandoahHeapLocker locker(heap->lock()); 412 heap->free_set()->log_status(); 413 } 414 415 // Continue the cycle with evacuation and optional update-refs. 416 // This may be skipped if there is nothing to evacuate. 417 // If so, evac_in_progress would be unset by collection set preparation code. 418 if (heap->is_evacuation_in_progress()) { 419 // Concurrently evacuate 420 heap->entry_evac(); 421 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; 422 423 // Perform update-refs phase, if required. This phase can be skipped if heuristics 424 // decides to piggy-back the update-refs on the next marking cycle. On either path, 425 // we need to turn off evacuation: either in init-update-refs, or in final-evac. 426 if (heap->heuristics()->should_start_update_refs()) { 427 heap->vmop_entry_init_updaterefs(); 428 heap->entry_updaterefs(); 429 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; 430 431 heap->vmop_entry_final_updaterefs(); 432 433 // Update references freed up collection set, kick the cleanup to reclaim the space. 434 heap->entry_cleanup(); 435 436 } else { 437 heap->vmop_entry_final_evac(); 438 } 439 } 440 441 // Cycle is complete 442 heap->heuristics()->record_success_concurrent(); 443 heap->shenandoah_policy()->record_success_concurrent(); 444 } 445 446 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { 447 ShenandoahHeap* heap = ShenandoahHeap::heap(); 448 if (heap->cancelled_gc()) { 449 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 450 if (!in_graceful_shutdown()) { 451 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, 452 err_msg("Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point))); 453 _degen_point = point; 454 } 455 return true; 456 } 457 return false; 458 } 459 460 void ShenandoahControlThread::stop() { 461 { 462 MutexLockerEx ml(Terminator_lock); 463 _should_terminate = true; 464 } 465 466 { 467 MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); 468 CGC_lock->notify_all(); 469 } 470 471 { 472 MutexLockerEx ml(Terminator_lock); 473 while (!_has_terminated) { 474 Terminator_lock->wait(); 475 } 476 } 477 } 478 479 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { 480 ShenandoahHeap* heap = ShenandoahHeap::heap(); 481 ShenandoahGCSession session(cause); 482 483 heap->vmop_entry_full(cause); 484 485 heap->heuristics()->record_success_full(); 486 heap->shenandoah_policy()->record_success_full(); 487 } 488 489 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { 490 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); 491 ShenandoahHeap* heap = ShenandoahHeap::heap(); 492 ShenandoahGCSession session(cause); 493 494 heap->vmop_degenerated(point); 495 496 heap->heuristics()->record_success_degenerated(); 497 heap->shenandoah_policy()->record_success_degenerated(); 498 } 499 500 void ShenandoahControlThread::service_uncommit(double shrink_before) { 501 ShenandoahHeap* heap = ShenandoahHeap::heap(); 502 503 // Determine if there is work to do. This avoids taking heap lock if there is 504 // no work available, avoids spamming logs with superfluous logging messages, 505 // and minimises the amount of work while locks are taken. 506 507 if (heap->committed() <= heap->min_capacity()) return; 508 509 bool has_work = false; 510 for (size_t i = 0; i < heap->num_regions(); i++) { 511 ShenandoahHeapRegion *r = heap->get_region(i); 512 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { 513 has_work = true; 514 break; 515 } 516 } 517 518 if (has_work) { 519 heap->entry_uncommit(shrink_before); 520 } 521 } 522 523 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { 524 return GCCause::is_user_requested_gc(cause) || 525 GCCause::is_serviceability_requested_gc(cause); 526 } 527 528 void ShenandoahControlThread::request_gc(GCCause::Cause cause) { 529 assert(GCCause::is_user_requested_gc(cause) || 530 GCCause::is_serviceability_requested_gc(cause) || 531 cause == GCCause::_shenandoah_metadata_gc_clear_softrefs || 532 cause == GCCause::_full_gc_alot || 533 cause == GCCause::_scavenge_alot, 534 "only requested GCs here"); 535 536 if (is_explicit_gc(cause)) { 537 if (!DisableExplicitGC) { 538 handle_requested_gc(cause); 539 } 540 } else { 541 handle_requested_gc(cause); 542 } 543 } 544 545 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { 546 _requested_gc_cause = cause; 547 _gc_requested.set(); 548 MonitorLockerEx ml(&_gc_waiters_lock); 549 while (_gc_requested.is_set()) { 550 ml.wait(); 551 } 552 } 553 554 void ShenandoahControlThread::handle_alloc_failure(size_t words) { 555 ShenandoahHeap* heap = ShenandoahHeap::heap(); 556 557 assert(current()->is_Java_thread(), "expect Java thread here"); 558 559 if (try_set_alloc_failure_gc()) { 560 // Only report the first allocation failure 561 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s", 562 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 563 564 // Now that alloc failure GC is scheduled, we can abort everything else 565 heap->cancel_gc(GCCause::_allocation_failure); 566 } 567 568 MonitorLockerEx ml(&_alloc_failure_waiters_lock); 569 while (is_alloc_failure_gc()) { 570 ml.wait(); 571 } 572 } 573 574 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { 575 Thread* t = Thread::current(); 576 577 ShenandoahHeap* heap = ShenandoahHeap::heap(); 578 579 if (try_set_alloc_failure_gc()) { 580 // Only report the first allocation failure 581 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", 582 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); 583 } 584 585 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); 586 } 587 588 void ShenandoahControlThread::notify_alloc_failure_waiters() { 589 _alloc_failure_gc.unset(); 590 MonitorLockerEx ml(&_alloc_failure_waiters_lock); 591 ml.notify_all(); 592 } 593 594 bool ShenandoahControlThread::try_set_alloc_failure_gc() { 595 return _alloc_failure_gc.try_set(); 596 } 597 598 bool ShenandoahControlThread::is_alloc_failure_gc() { 599 return _alloc_failure_gc.is_set(); 600 } 601 602 void ShenandoahControlThread::notify_gc_waiters() { 603 _gc_requested.unset(); 604 MonitorLockerEx ml(&_gc_waiters_lock); 605 ml.notify_all(); 606 } 607 608 void ShenandoahControlThread::handle_counters_update() { 609 if (_do_counters_update.is_set()) { 610 _do_counters_update.unset(); 611 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 612 } 613 } 614 615 void ShenandoahControlThread::handle_force_counters_update() { 616 if (_force_counters_update.is_set()) { 617 _do_counters_update.unset(); // reset these too, we do update now! 618 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 619 } 620 } 621 622 void ShenandoahControlThread::notify_heap_changed() { 623 // This is called from allocation path, and thus should be fast. 624 625 // Update monitoring counters when we took a new region. This amortizes the 626 // update costs on slow path. 627 if (_do_counters_update.is_unset()) { 628 _do_counters_update.set(); 629 } 630 // Notify that something had changed. 631 if (_heap_changed.is_unset()) { 632 _heap_changed.set(); 633 } 634 } 635 636 void ShenandoahControlThread::pacing_notify_alloc(size_t words) { 637 assert(ShenandoahPacing, "should only call when pacing is enabled"); 638 Atomic::add(words, &_allocs_seen); 639 } 640 641 void ShenandoahControlThread::set_forced_counters_update(bool value) { 642 _force_counters_update.set_cond(value); 643 } 644 645 void ShenandoahControlThread::print() const { 646 print_on(tty); 647 } 648 649 void ShenandoahControlThread::print_on(outputStream* st) const { 650 st->print("Shenandoah Concurrent Thread"); 651 Thread::print_on(st); 652 st->cr(); 653 } 654 655 void ShenandoahControlThread::start() { 656 create_and_start(); 657 } 658 659 void ShenandoahControlThread::makeSurrogateLockerThread(TRAPS) { 660 assert(UseShenandoahGC, "SLT thread needed only for concurrent GC"); 661 assert(THREAD->is_Java_thread(), "must be a Java thread"); 662 assert(_slt == NULL, "SLT already created"); 663 _slt = SurrogateLockerThread::make(THREAD); 664 } 665 666 void ShenandoahControlThread::prepare_for_graceful_shutdown() { 667 _graceful_shutdown.set(); 668 } 669 670 bool ShenandoahControlThread::in_graceful_shutdown() { 671 return _graceful_shutdown.is_set(); 672 }