1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  33 #include "gc/shenandoah/shenandoahControlThread.hpp"
  34 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  35 #include "gc/shenandoah/shenandoahUtils.hpp"
  36 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  37 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "memory/universe.hpp"
  40 
  41 ShenandoahControlThread::ShenandoahControlThread() :
  42   ConcurrentGCThread(),
  43   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
  44   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
  45   _periodic_task(this),
  46   _requested_gc_cause(GCCause::_no_cause_specified),
  47   _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
  48   _allocs_seen(0) {
  49 
  50   create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
  51   _periodic_task.enroll();
  52   _periodic_satb_flush_task.enroll();
  53 }
  54 
  55 ShenandoahControlThread::~ShenandoahControlThread() {
  56   // This is here so that super is called.
  57 }
  58 
  59 void ShenandoahPeriodicTask::task() {
  60   _thread->handle_force_counters_update();
  61   _thread->handle_counters_update();
  62 }
  63 
  64 void ShenandoahPeriodicSATBFlushTask::task() {
  65   ShenandoahHeap::heap()->force_satb_flush_all_threads();
  66 }
  67 
  68 void ShenandoahControlThread::run_service() {
  69   ShenandoahHeap* heap = ShenandoahHeap::heap();
  70 
  71   GCMode default_mode = heap->is_traversal_mode() ?
  72                            concurrent_traversal : concurrent_normal;
  73   GCCause::Cause default_cause = heap->is_traversal_mode() ?
  74                            GCCause::_shenandoah_traversal_gc : GCCause::_shenandoah_concurrent_gc;
  75   int sleep = ShenandoahControlIntervalMin;
  76 
  77   double last_shrink_time = os::elapsedTime();
  78   double last_sleep_adjust_time = os::elapsedTime();
  79 
  80   // Shrink period avoids constantly polling regions for shrinking.
  81   // Having a period 10x lower than the delay would mean we hit the
  82   // shrinking with lag of less than 1/10-th of true delay.
  83   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  84   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  85 
  86   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
  87   ShenandoahHeuristics* heuristics = heap->heuristics();
  88   while (!in_graceful_shutdown() && !should_terminate()) {
  89     // Figure out if we have pending requests.
  90     bool alloc_failure_pending = _alloc_failure_gc.is_set();
  91     bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
  92     bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
  93 
  94     // This control loop iteration have seen this much allocations.
  95     size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
  96 
  97     // Choose which GC mode to run in. The block below should select a single mode.
  98     GCMode mode = none;
  99     GCCause::Cause cause = GCCause::_last_gc_cause;
 100     ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
 101 
 102     if (alloc_failure_pending) {
 103       // Allocation failure takes precedence: we have to deal with it first thing
 104       log_info(gc)("Trigger: Handle Allocation Failure");
 105 
 106       cause = GCCause::_allocation_failure;
 107 
 108       // Consume the degen point, and seed it with default value
 109       degen_point = _degen_point;
 110       _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
 111 
 112       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 113         heuristics->record_allocation_failure_gc();
 114         policy->record_alloc_failure_to_degenerated(degen_point);
 115         mode = stw_degenerated;
 116       } else {
 117         heuristics->record_allocation_failure_gc();
 118         policy->record_alloc_failure_to_full();
 119         mode = stw_full;
 120       }
 121 
 122     } else if (explicit_gc_requested) {
 123       cause = _requested_gc_cause;
 124       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 125 
 126       heuristics->record_requested_gc();
 127 
 128       if (ExplicitGCInvokesConcurrent) {
 129         policy->record_explicit_to_concurrent();
 130         mode = default_mode;
 131         // Unload and clean up everything
 132         heap->set_process_references(heuristics->can_process_references());
 133         heap->set_unload_classes(heuristics->can_unload_classes());
 134       } else {
 135         policy->record_explicit_to_full();
 136         mode = stw_full;
 137       }
 138     } else if (implicit_gc_requested) {
 139       cause = _requested_gc_cause;
 140       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 141 
 142       heuristics->record_requested_gc();
 143 
 144       if (ShenandoahImplicitGCInvokesConcurrent) {
 145         policy->record_implicit_to_concurrent();
 146         mode = default_mode;
 147 
 148         // Unload and clean up everything
 149         heap->set_process_references(heuristics->can_process_references());
 150         heap->set_unload_classes(heuristics->can_unload_classes());
 151       } else {
 152         policy->record_implicit_to_full();
 153         mode = stw_full;
 154       }
 155     } else {
 156       // Potential normal cycle: ask heuristics if it wants to act
 157       if (heuristics->should_start_gc()) {
 158         mode = default_mode;
 159         cause = default_cause;
 160       }
 161 
 162       // Ask policy if this cycle wants to process references or unload classes
 163       heap->set_process_references(heuristics->should_process_references());
 164       heap->set_unload_classes(heuristics->should_unload_classes());
 165     }
 166 
 167     // Blow all soft references on this cycle, if handling allocation failure,
 168     // or we are requested to do so unconditionally.
 169     if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) {
 170       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 171     }
 172 
 173     bool gc_requested = (mode != none);
 174     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 175 
 176     if (gc_requested) {
 177       heap->reset_bytes_allocated_since_gc_start();
 178 
 179       // If GC was requested, we are sampling the counters even without actual triggers
 180       // from allocation machinery. This captures GC phases more accurately.
 181       set_forced_counters_update(true);
 182 
 183       // If GC was requested, we better dump freeset data for performance debugging
 184       {
 185         ShenandoahHeapLocker locker(heap->lock());
 186         heap->free_set()->log_status();
 187       }
 188     }
 189 
 190     switch (mode) {
 191       case none:
 192         break;
 193       case concurrent_traversal:
 194         service_concurrent_traversal_cycle(cause);
 195         break;
 196       case concurrent_normal:
 197         service_concurrent_normal_cycle(cause);
 198         break;
 199       case stw_degenerated:
 200         service_stw_degenerated_cycle(cause, degen_point);
 201         break;
 202       case stw_full:
 203         service_stw_full_cycle(cause);
 204         break;
 205       default:
 206         ShouldNotReachHere();
 207     }
 208 
 209     if (gc_requested) {
 210       // If this was the requested GC cycle, notify waiters about it
 211       if (explicit_gc_requested || implicit_gc_requested) {
 212         notify_gc_waiters();
 213       }
 214 
 215       // If this was the allocation failure GC cycle, notify waiters about it
 216       if (alloc_failure_pending) {
 217         notify_alloc_failure_waiters();
 218       }
 219 
 220       // Report current free set state at the end of cycle, whether
 221       // it is a normal completion, or the abort.
 222       {
 223         ShenandoahHeapLocker locker(heap->lock());
 224         heap->free_set()->log_status();
 225 
 226         // Notify Universe about new heap usage. This has implications for
 227         // global soft refs policy, and we better report it every time heap
 228         // usage goes down.
 229         Universe::update_heap_info_at_gc();
 230       }
 231 
 232       // Disable forced counters update, and update counters one more time
 233       // to capture the state at the end of GC session.
 234       handle_force_counters_update();
 235       set_forced_counters_update(false);
 236 
 237       // Retract forceful part of soft refs policy
 238       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 239 
 240       // Clear metaspace oom flag, if current cycle unloaded classes
 241       if (heap->unload_classes()) {
 242         heuristics->clear_metaspace_oom();
 243       }
 244 
 245       // GC is over, we are at idle now
 246       if (ShenandoahPacing) {
 247         heap->pacer()->setup_for_idle();
 248       }
 249     } else {
 250       // Allow allocators to know we have seen this much regions
 251       if (ShenandoahPacing && (allocs_seen > 0)) {
 252         heap->pacer()->report_alloc(allocs_seen);
 253       }
 254     }
 255 
 256     double current = os::elapsedTime();
 257 
 258     if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) {
 259       // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything.
 260       // Regular paths uncommit only occasionally.
 261       double shrink_before = explicit_gc_requested ?
 262                              current :
 263                              current - (ShenandoahUncommitDelay / 1000.0);
 264       service_uncommit(shrink_before);
 265       last_shrink_time = current;
 266     }
 267 
 268     // Wait before performing the next action. If allocation happened during this wait,
 269     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 270     // back off exponentially.
 271     if (_heap_changed.try_unset()) {
 272       sleep = ShenandoahControlIntervalMin;
 273     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 274       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 275       last_sleep_adjust_time = current;
 276     }
 277     os::naked_short_sleep(sleep);
 278   }
 279 
 280   // Wait for the actual stop(), can't leave run_service() earlier.
 281   while (!should_terminate()) {
 282     os::naked_short_sleep(ShenandoahControlIntervalMin);
 283   }
 284 }
 285 
 286 void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
 287   GCIdMark gc_id_mark;
 288   ShenandoahGCSession session(cause);
 289 
 290   ShenandoahHeap* heap = ShenandoahHeap::heap();
 291   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 292 
 293   // Reset for upcoming cycle
 294   heap->entry_reset();
 295 
 296   heap->vmop_entry_init_traversal();
 297 
 298   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
 299 
 300   heap->entry_traversal();
 301   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
 302 
 303   heap->vmop_entry_final_traversal();
 304 
 305   heap->entry_cleanup();
 306 
 307   heap->heuristics()->record_success_concurrent();
 308   heap->shenandoah_policy()->record_success_concurrent();
 309 }
 310 
 311 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 312   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 313   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 314   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 315   // tries to evac something and no memory is available), cycle degrades to Full GC.
 316   //
 317   // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
 318   // heuristics says there are no regions to compact, and all the collection comes from immediately
 319   // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
 320   // mark from the next cycle.
 321   //
 322   // ................................................................................................
 323   //
 324   //                                    (immediate garbage shortcut)                Concurrent GC
 325   //                             /-------------------------------------------\
 326   //                             |                       (coalesced UR)      v
 327   //                             |                  /----------------------->o
 328   //                             |                  |                        |
 329   //                             |                  |                        v
 330   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 331   //                   |                    |                 |              ^
 332   //                   | (af)               | (af)            | (af)         |
 333   // ..................|....................|.................|..............|.......................
 334   //                   |                    |                 |              |
 335   //                   |                    |                 |              |      Degenerated GC
 336   //                   v                    v                 v              |
 337   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 338   //                   |                    |                 |              ^
 339   //                   | (af)               | (af)            | (af)         |
 340   // ..................|....................|.................|..............|.......................
 341   //                   |                    |                 |              |
 342   //                   |                    v                 |              |      Full GC
 343   //                   \------------------->o<----------------/              |
 344   //                                        |                                |
 345   //                                        v                                |
 346   //                                      Full GC  --------------------------/
 347   //
 348   ShenandoahHeap* heap = ShenandoahHeap::heap();
 349 
 350   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 351 
 352   GCIdMark gc_id_mark;
 353   ShenandoahGCSession session(cause);
 354 
 355   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 356 
 357   // Reset for upcoming marking
 358   heap->entry_reset();
 359 
 360   // Start initial mark under STW
 361   heap->vmop_entry_init_mark();
 362 
 363   // Continue concurrent mark
 364   heap->entry_mark();
 365   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
 366 
 367   // If not cancelled, can try to concurrently pre-clean
 368   heap->entry_preclean();
 369 
 370   // Complete marking under STW, and start evacuation
 371   heap->vmop_entry_final_mark();
 372 
 373   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 374   // the space. This would be the last action if there is nothing to evacuate.
 375   heap->entry_cleanup();
 376 
 377   {
 378     ShenandoahHeapLocker locker(heap->lock());
 379     heap->free_set()->log_status();
 380   }
 381 
 382   // Continue the cycle with evacuation and optional update-refs.
 383   // This may be skipped if there is nothing to evacuate.
 384   // If so, evac_in_progress would be unset by collection set preparation code.
 385   if (heap->is_evacuation_in_progress()) {
 386     // Concurrently evacuate
 387     heap->entry_evac();
 388     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
 389 
 390     // Perform update-refs phase, if required. This phase can be skipped if heuristics
 391     // decides to piggy-back the update-refs on the next marking cycle. On either path,
 392     // we need to turn off evacuation: either in init-update-refs, or in final-evac.
 393     if (heap->heuristics()->should_start_update_refs()) {
 394       heap->vmop_entry_init_updaterefs();
 395       heap->entry_updaterefs();
 396       if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
 397 
 398       heap->vmop_entry_final_updaterefs();
 399 
 400       // Update references freed up collection set, kick the cleanup to reclaim the space.
 401       heap->entry_cleanup();
 402 
 403     } else {
 404       heap->vmop_entry_final_evac();
 405     }
 406   }
 407 
 408   // Cycle is complete
 409   heap->heuristics()->record_success_concurrent();
 410   heap->shenandoah_policy()->record_success_concurrent();
 411 }
 412 
 413 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
 414   ShenandoahHeap* heap = ShenandoahHeap::heap();
 415   if (heap->cancelled_gc()) {
 416     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 417     if (!in_graceful_shutdown()) {
 418       assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
 419               "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
 420       _degen_point = point;
 421     }
 422     return true;
 423   }
 424   return false;
 425 }
 426 
 427 void ShenandoahControlThread::stop_service() {
 428   // Nothing to do here.
 429 }
 430 
 431 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 432   GCIdMark gc_id_mark;
 433   ShenandoahGCSession session(cause);
 434 
 435   ShenandoahHeap* heap = ShenandoahHeap::heap();
 436   heap->vmop_entry_full(cause);
 437 
 438   heap->heuristics()->record_success_full();
 439   heap->shenandoah_policy()->record_success_full();
 440 }
 441 
 442 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 443   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 444 
 445   GCIdMark gc_id_mark;
 446   ShenandoahGCSession session(cause);
 447 
 448   ShenandoahHeap* heap = ShenandoahHeap::heap();
 449   heap->vmop_degenerated(point);
 450 
 451   heap->heuristics()->record_success_degenerated();
 452   heap->shenandoah_policy()->record_success_degenerated();
 453 }
 454 
 455 void ShenandoahControlThread::service_uncommit(double shrink_before) {
 456   ShenandoahHeap* heap = ShenandoahHeap::heap();
 457 
 458   // Determine if there is work to do. This avoids taking heap lock if there is
 459   // no work available, avoids spamming logs with superfluous logging messages,
 460   // and minimises the amount of work while locks are taken.
 461 
 462   if (heap->committed() <= heap->min_capacity()) return;
 463 
 464   bool has_work = false;
 465   for (size_t i = 0; i < heap->num_regions(); i++) {
 466     ShenandoahHeapRegion *r = heap->get_region(i);
 467     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 468       has_work = true;
 469       break;
 470     }
 471   }
 472 
 473   if (has_work) {
 474     heap->entry_uncommit(shrink_before);
 475   }
 476 }
 477 
 478 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 479   return GCCause::is_user_requested_gc(cause) ||
 480          GCCause::is_serviceability_requested_gc(cause);
 481 }
 482 
 483 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 484   assert(GCCause::is_user_requested_gc(cause) ||
 485          GCCause::is_serviceability_requested_gc(cause) ||
 486          cause == GCCause::_metadata_GC_clear_soft_refs ||
 487          cause == GCCause::_full_gc_alot ||
 488          cause == GCCause::_wb_full_gc ||
 489          cause == GCCause::_scavenge_alot,
 490          "only requested GCs here");
 491 
 492   if (is_explicit_gc(cause)) {
 493     if (!DisableExplicitGC) {
 494       handle_requested_gc(cause);
 495     }
 496   } else {
 497     handle_requested_gc(cause);
 498   }
 499 }
 500 
 501 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 502   _requested_gc_cause = cause;
 503   _gc_requested.set();
 504   MonitorLockerEx ml(&_gc_waiters_lock);
 505   while (_gc_requested.is_set()) {
 506     ml.wait();
 507   }
 508 }
 509 
 510 void ShenandoahControlThread::handle_alloc_failure(size_t words) {
 511   ShenandoahHeap* heap = ShenandoahHeap::heap();
 512 
 513   assert(current()->is_Java_thread(), "expect Java thread here");
 514 
 515   if (try_set_alloc_failure_gc()) {
 516     // Only report the first allocation failure
 517     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s",
 518                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 519 
 520     // Now that alloc failure GC is scheduled, we can abort everything else
 521     heap->cancel_gc(GCCause::_allocation_failure);
 522   }
 523 
 524   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 525   while (is_alloc_failure_gc()) {
 526     ml.wait();
 527   }
 528 }
 529 
 530 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 531   ShenandoahHeap* heap = ShenandoahHeap::heap();
 532 
 533   if (try_set_alloc_failure_gc()) {
 534     // Only report the first allocation failure
 535     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 536                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 537   }
 538 
 539   // Forcefully report allocation failure
 540   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 541 }
 542 
 543 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 544   _alloc_failure_gc.unset();
 545   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 546   ml.notify_all();
 547 }
 548 
 549 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 550   return _alloc_failure_gc.try_set();
 551 }
 552 
 553 bool ShenandoahControlThread::is_alloc_failure_gc() {
 554   return _alloc_failure_gc.is_set();
 555 }
 556 
 557 void ShenandoahControlThread::notify_gc_waiters() {
 558   _gc_requested.unset();
 559   MonitorLockerEx ml(&_gc_waiters_lock);
 560   ml.notify_all();
 561 }
 562 
 563 void ShenandoahControlThread::handle_counters_update() {
 564   if (_do_counters_update.is_set()) {
 565     _do_counters_update.unset();
 566     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 567   }
 568 }
 569 
 570 void ShenandoahControlThread::handle_force_counters_update() {
 571   if (_force_counters_update.is_set()) {
 572     _do_counters_update.unset(); // reset these too, we do update now!
 573     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 574   }
 575 }
 576 
 577 void ShenandoahControlThread::notify_heap_changed() {
 578   // This is called from allocation path, and thus should be fast.
 579 
 580   // Update monitoring counters when we took a new region. This amortizes the
 581   // update costs on slow path.
 582   if (_do_counters_update.is_unset()) {
 583     _do_counters_update.set();
 584   }
 585   // Notify that something had changed.
 586   if (_heap_changed.is_unset()) {
 587     _heap_changed.set();
 588   }
 589 }
 590 
 591 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 592   assert(ShenandoahPacing, "should only call when pacing is enabled");
 593   Atomic::add(words, &_allocs_seen);
 594 }
 595 
 596 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 597   _force_counters_update.set_cond(value);
 598 }
 599 
 600 void ShenandoahControlThread::print() const {
 601   print_on(tty);
 602 }
 603 
 604 void ShenandoahControlThread::print_on(outputStream* st) const {
 605   st->print("Shenandoah Concurrent Thread");
 606   Thread::print_on(st);
 607   st->cr();
 608 }
 609 
 610 void ShenandoahControlThread::start() {
 611   create_and_start();
 612 }
 613 
 614 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 615   _graceful_shutdown.set();
 616 }
 617 
 618 bool ShenandoahControlThread::in_graceful_shutdown() {
 619   return _graceful_shutdown.is_set();
 620 }