1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  33 #include "gc/shenandoah/shenandoahControlThread.hpp"
  34 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  35 #include "gc/shenandoah/shenandoahUtils.hpp"
  36 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  37 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "memory/universe.hpp"
  40 
  41 ShenandoahControlThread::ShenandoahControlThread() :
  42   ConcurrentGCThread(),
  43   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
  44   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
  45   _periodic_task(this),
  46   _requested_gc_cause(GCCause::_no_cause_specified),
  47   _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
  48   _allocs_seen(0) {
  49 
  50   create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
  51   _periodic_task.enroll();
  52   _periodic_satb_flush_task.enroll();
  53 }
  54 
  55 ShenandoahControlThread::~ShenandoahControlThread() {
  56   // This is here so that super is called.
  57 }
  58 
  59 void ShenandoahPeriodicTask::task() {
  60   _thread->handle_force_counters_update();
  61   _thread->handle_counters_update();
  62 }
  63 
  64 void ShenandoahPeriodicSATBFlushTask::task() {
  65   ShenandoahHeap::heap()->force_satb_flush_all_threads();
  66 }
  67 
  68 void ShenandoahControlThread::run_service() {
  69   ShenandoahHeap* heap = ShenandoahHeap::heap();
  70 
  71   int sleep = ShenandoahControlIntervalMin;
  72 
  73   double last_shrink_time = os::elapsedTime();
  74   double last_sleep_adjust_time = os::elapsedTime();
  75 
  76   // Shrink period avoids constantly polling regions for shrinking.
  77   // Having a period 10x lower than the delay would mean we hit the
  78   // shrinking with lag of less than 1/10-th of true delay.
  79   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  80   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  81 
  82   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
  83   ShenandoahHeuristics* heuristics = heap->heuristics();
  84   while (!in_graceful_shutdown() && !should_terminate()) {
  85     // Figure out if we have pending requests.
  86     bool alloc_failure_pending = _alloc_failure_gc.is_set();
  87     bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
  88     bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
  89 
  90     // This control loop iteration have seen this much allocations.
  91     size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
  92 
  93     // Choose which GC mode to run in. The block below should select a single mode.
  94     GCMode mode = none;
  95     GCCause::Cause cause = GCCause::_last_gc_cause;
  96     ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
  97 
  98     if (alloc_failure_pending) {
  99       // Allocation failure takes precedence: we have to deal with it first thing
 100       log_info(gc)("Trigger: Handle Allocation Failure");
 101 
 102       cause = GCCause::_allocation_failure;
 103 
 104       // Consume the degen point, and seed it with default value
 105       degen_point = _degen_point;
 106       _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
 107 
 108       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 109         heuristics->record_allocation_failure_gc();
 110         policy->record_alloc_failure_to_degenerated(degen_point);
 111         mode = stw_degenerated;
 112       } else {
 113         heuristics->record_allocation_failure_gc();
 114         policy->record_alloc_failure_to_full();
 115         mode = stw_full;
 116       }
 117 
 118     } else if (explicit_gc_requested) {
 119       cause = _requested_gc_cause;
 120       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 121 
 122       heuristics->record_requested_gc();
 123 
 124       if (ExplicitGCInvokesConcurrent) {
 125         policy->record_explicit_to_concurrent();
 126         if (heuristics->can_do_traversal_gc()) {
 127           mode = concurrent_traversal;
 128         } else {
 129           mode = concurrent_normal;
 130         }
 131         // Unload and clean up everything
 132         heap->set_process_references(heuristics->can_process_references());
 133         heap->set_unload_classes(heuristics->can_unload_classes());
 134       } else {
 135         policy->record_explicit_to_full();
 136         mode = stw_full;
 137       }
 138     } else if (implicit_gc_requested) {
 139       cause = _requested_gc_cause;
 140       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 141 
 142       heuristics->record_requested_gc();
 143 
 144       if (ShenandoahImplicitGCInvokesConcurrent) {
 145         policy->record_implicit_to_concurrent();
 146         if (heuristics->can_do_traversal_gc()) {
 147           mode = concurrent_traversal;
 148         } else {
 149           mode = concurrent_normal;
 150         }
 151 
 152         // Unload and clean up everything
 153         heap->set_process_references(heuristics->can_process_references());
 154         heap->set_unload_classes(heuristics->can_unload_classes());
 155       } else {
 156         policy->record_implicit_to_full();
 157         mode = stw_full;
 158       }
 159     } else {
 160       // Potential normal cycle: ask heuristics if it wants to act
 161       if (heuristics->should_start_traversal_gc()) {
 162         mode = concurrent_traversal;
 163         cause = GCCause::_shenandoah_traversal_gc;
 164       } else if (heuristics->should_start_normal_gc()) {
 165         mode = concurrent_normal;
 166         cause = GCCause::_shenandoah_concurrent_gc;
 167       }
 168 
 169       // Ask policy if this cycle wants to process references or unload classes
 170       heap->set_process_references(heuristics->should_process_references());
 171       heap->set_unload_classes(heuristics->should_unload_classes());
 172     }
 173 
 174     // Blow all soft references on this cycle, if handling allocation failure,
 175     // or we are requested to do so unconditionally.
 176     if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) {
 177       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 178     }
 179 
 180     bool gc_requested = (mode != none);
 181     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 182 
 183     if (gc_requested) {
 184       heap->reset_bytes_allocated_since_gc_start();
 185 
 186       // If GC was requested, we are sampling the counters even without actual triggers
 187       // from allocation machinery. This captures GC phases more accurately.
 188       set_forced_counters_update(true);
 189 
 190       // If GC was requested, we better dump freeset data for performance debugging
 191       {
 192         ShenandoahHeapLocker locker(heap->lock());
 193         heap->free_set()->log_status();
 194       }
 195     }
 196 
 197     switch (mode) {
 198       case none:
 199         break;
 200       case concurrent_traversal:
 201         service_concurrent_traversal_cycle(cause);
 202         break;
 203       case concurrent_normal:
 204         service_concurrent_normal_cycle(cause);
 205         break;
 206       case stw_degenerated:
 207         service_stw_degenerated_cycle(cause, degen_point);
 208         break;
 209       case stw_full:
 210         service_stw_full_cycle(cause);
 211         break;
 212       default:
 213         ShouldNotReachHere();
 214     }
 215 
 216     if (gc_requested) {
 217       // If this was the requested GC cycle, notify waiters about it
 218       if (explicit_gc_requested || implicit_gc_requested) {
 219         notify_gc_waiters();
 220       }
 221 
 222       // If this was the allocation failure GC cycle, notify waiters about it
 223       if (alloc_failure_pending) {
 224         notify_alloc_failure_waiters();
 225       }
 226 
 227       // Report current free set state at the end of cycle, whether
 228       // it is a normal completion, or the abort.
 229       {
 230         ShenandoahHeapLocker locker(heap->lock());
 231         heap->free_set()->log_status();
 232 
 233         // Notify Universe about new heap usage. This has implications for
 234         // global soft refs policy, and we better report it every time heap
 235         // usage goes down.
 236         Universe::update_heap_info_at_gc();
 237       }
 238 
 239       // Disable forced counters update, and update counters one more time
 240       // to capture the state at the end of GC session.
 241       handle_force_counters_update();
 242       set_forced_counters_update(false);
 243 
 244       // Retract forceful part of soft refs policy
 245       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 246 
 247       // Clear metaspace oom flag, if current cycle unloaded classes
 248       if (heap->unload_classes()) {
 249         heuristics->clear_metaspace_oom();
 250       }
 251 
 252       // GC is over, we are at idle now
 253       if (ShenandoahPacing) {
 254         heap->pacer()->setup_for_idle();
 255       }
 256     } else {
 257       // Allow allocators to know we have seen this much regions
 258       if (ShenandoahPacing && (allocs_seen > 0)) {
 259         heap->pacer()->report_alloc(allocs_seen);
 260       }
 261     }
 262 
 263     double current = os::elapsedTime();
 264 
 265     if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) {
 266       // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything.
 267       // Regular paths uncommit only occasionally.
 268       double shrink_before = explicit_gc_requested ?
 269                              current :
 270                              current - (ShenandoahUncommitDelay / 1000.0);
 271       service_uncommit(shrink_before);
 272       last_shrink_time = current;
 273     }
 274 
 275     // Wait before performing the next action. If allocation happened during this wait,
 276     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 277     // back off exponentially.
 278     if (_heap_changed.try_unset()) {
 279       sleep = ShenandoahControlIntervalMin;
 280     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 281       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 282       last_sleep_adjust_time = current;
 283     }
 284     os::naked_short_sleep(sleep);
 285   }
 286 
 287   // Wait for the actual stop(), can't leave run_service() earlier.
 288   while (!should_terminate()) {
 289     os::naked_short_sleep(ShenandoahControlIntervalMin);
 290   }
 291 }
 292 
 293 void ShenandoahControlThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
 294   GCIdMark gc_id_mark;
 295   ShenandoahGCSession session(cause);
 296 
 297   ShenandoahHeap* heap = ShenandoahHeap::heap();
 298   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 299 
 300   // Reset for upcoming cycle
 301   heap->entry_reset();
 302 
 303   heap->vmop_entry_init_traversal();
 304 
 305   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
 306 
 307   heap->entry_traversal();
 308   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
 309 
 310   heap->vmop_entry_final_traversal();
 311 
 312   heap->entry_cleanup();
 313 
 314   heap->heuristics()->record_success_concurrent();
 315   heap->shenandoah_policy()->record_success_concurrent();
 316 }
 317 
 318 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 319   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 320   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 321   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 322   // tries to evac something and no memory is available), cycle degrades to Full GC.
 323   //
 324   // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
 325   // heuristics says there are no regions to compact, and all the collection comes from immediately
 326   // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
 327   // mark from the next cycle.
 328   //
 329   // ................................................................................................
 330   //
 331   //                                    (immediate garbage shortcut)                Concurrent GC
 332   //                             /-------------------------------------------\
 333   //                             |                       (coalesced UR)      v
 334   //                             |                  /----------------------->o
 335   //                             |                  |                        |
 336   //                             |                  |                        v
 337   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 338   //                   |                    |                 |              ^
 339   //                   | (af)               | (af)            | (af)         |
 340   // ..................|....................|.................|..............|.......................
 341   //                   |                    |                 |              |
 342   //                   |                    |                 |              |      Degenerated GC
 343   //                   v                    v                 v              |
 344   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 345   //                   |                    |                 |              ^
 346   //                   | (af)               | (af)            | (af)         |
 347   // ..................|....................|.................|..............|.......................
 348   //                   |                    |                 |              |
 349   //                   |                    v                 |              |      Full GC
 350   //                   \------------------->o<----------------/              |
 351   //                                        |                                |
 352   //                                        v                                |
 353   //                                      Full GC  --------------------------/
 354   //
 355   ShenandoahHeap* heap = ShenandoahHeap::heap();
 356 
 357   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 358 
 359   GCIdMark gc_id_mark;
 360   ShenandoahGCSession session(cause);
 361 
 362   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 363 
 364   // Reset for upcoming marking
 365   heap->entry_reset();
 366 
 367   // Start initial mark under STW
 368   heap->vmop_entry_init_mark();
 369 
 370   // Continue concurrent mark
 371   heap->entry_mark();
 372   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
 373 
 374   // If not cancelled, can try to concurrently pre-clean
 375   heap->entry_preclean();
 376 
 377   // Complete marking under STW, and start evacuation
 378   heap->vmop_entry_final_mark();
 379 
 380   // Evacuate concurrent roots
 381   heap->entry_concurrent_roots();
 382 
 383   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 384   // the space. This would be the last action if there is nothing to evacuate.
 385   heap->entry_cleanup();
 386 
 387   {
 388     ShenandoahHeapLocker locker(heap->lock());
 389     heap->free_set()->log_status();
 390   }
 391 
 392   // Continue the cycle with evacuation and optional update-refs.
 393   // This may be skipped if there is nothing to evacuate.
 394   // If so, evac_in_progress would be unset by collection set preparation code.
 395   if (heap->is_evacuation_in_progress()) {
 396     // Concurrently evacuate
 397     heap->entry_evac();
 398     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
 399 
 400     // Perform update-refs phase, if required. This phase can be skipped if heuristics
 401     // decides to piggy-back the update-refs on the next marking cycle. On either path,
 402     // we need to turn off evacuation: either in init-update-refs, or in final-evac.
 403     if (heap->heuristics()->should_start_update_refs()) {
 404       heap->vmop_entry_init_updaterefs();
 405       heap->entry_updaterefs();
 406       if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
 407 
 408       heap->vmop_entry_final_updaterefs();
 409 
 410       // Update references freed up collection set, kick the cleanup to reclaim the space.
 411       heap->entry_cleanup();
 412 
 413     } else {
 414       heap->vmop_entry_final_evac();
 415     }
 416   }
 417 
 418   // Cycle is complete
 419   heap->heuristics()->record_success_concurrent();
 420   heap->shenandoah_policy()->record_success_concurrent();
 421 }
 422 
 423 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
 424   ShenandoahHeap* heap = ShenandoahHeap::heap();
 425   if (heap->cancelled_gc()) {
 426     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 427     if (!in_graceful_shutdown()) {
 428       assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
 429               "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
 430       _degen_point = point;
 431     }
 432     return true;
 433   }
 434   return false;
 435 }
 436 
 437 void ShenandoahControlThread::stop_service() {
 438   // Nothing to do here.
 439 }
 440 
 441 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 442   GCIdMark gc_id_mark;
 443   ShenandoahGCSession session(cause);
 444 
 445   ShenandoahHeap* heap = ShenandoahHeap::heap();
 446   heap->vmop_entry_full(cause);
 447 
 448   heap->heuristics()->record_success_full();
 449   heap->shenandoah_policy()->record_success_full();
 450 }
 451 
 452 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 453   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 454 
 455   GCIdMark gc_id_mark;
 456   ShenandoahGCSession session(cause);
 457 
 458   ShenandoahHeap* heap = ShenandoahHeap::heap();
 459   heap->vmop_degenerated(point);
 460 
 461   heap->heuristics()->record_success_degenerated();
 462   heap->shenandoah_policy()->record_success_degenerated();
 463 }
 464 
 465 void ShenandoahControlThread::service_uncommit(double shrink_before) {
 466   ShenandoahHeap* heap = ShenandoahHeap::heap();
 467 
 468   // Determine if there is work to do. This avoids taking heap lock if there is
 469   // no work available, avoids spamming logs with superfluous logging messages,
 470   // and minimises the amount of work while locks are taken.
 471 
 472   if (heap->committed() <= heap->min_capacity()) return;
 473 
 474   bool has_work = false;
 475   for (size_t i = 0; i < heap->num_regions(); i++) {
 476     ShenandoahHeapRegion *r = heap->get_region(i);
 477     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 478       has_work = true;
 479       break;
 480     }
 481   }
 482 
 483   if (has_work) {
 484     heap->entry_uncommit(shrink_before);
 485   }
 486 }
 487 
 488 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 489   return GCCause::is_user_requested_gc(cause) ||
 490          GCCause::is_serviceability_requested_gc(cause);
 491 }
 492 
 493 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 494   assert(GCCause::is_user_requested_gc(cause) ||
 495          GCCause::is_serviceability_requested_gc(cause) ||
 496          cause == GCCause::_metadata_GC_clear_soft_refs ||
 497          cause == GCCause::_full_gc_alot ||
 498          cause == GCCause::_wb_full_gc ||
 499          cause == GCCause::_scavenge_alot,
 500          "only requested GCs here");
 501 
 502   if (is_explicit_gc(cause)) {
 503     if (!DisableExplicitGC) {
 504       handle_requested_gc(cause);
 505     }
 506   } else {
 507     handle_requested_gc(cause);
 508   }
 509 }
 510 
 511 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 512   _requested_gc_cause = cause;
 513   _gc_requested.set();
 514   MonitorLocker ml(&_gc_waiters_lock);
 515   while (_gc_requested.is_set()) {
 516     ml.wait();
 517   }
 518 }
 519 
 520 void ShenandoahControlThread::handle_alloc_failure(size_t words) {
 521   ShenandoahHeap* heap = ShenandoahHeap::heap();
 522 
 523   assert(current()->is_Java_thread(), "expect Java thread here");
 524 
 525   if (try_set_alloc_failure_gc()) {
 526     // Only report the first allocation failure
 527     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s",
 528                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 529 
 530     // Now that alloc failure GC is scheduled, we can abort everything else
 531     heap->cancel_gc(GCCause::_allocation_failure);
 532   }
 533 
 534   MonitorLocker ml(&_alloc_failure_waiters_lock);
 535   while (is_alloc_failure_gc()) {
 536     ml.wait();
 537   }
 538 }
 539 
 540 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 541   ShenandoahHeap* heap = ShenandoahHeap::heap();
 542 
 543   if (try_set_alloc_failure_gc()) {
 544     // Only report the first allocation failure
 545     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 546                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 547   }
 548 
 549   // Forcefully report allocation failure
 550   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 551 }
 552 
 553 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 554   _alloc_failure_gc.unset();
 555   MonitorLocker ml(&_alloc_failure_waiters_lock);
 556   ml.notify_all();
 557 }
 558 
 559 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 560   return _alloc_failure_gc.try_set();
 561 }
 562 
 563 bool ShenandoahControlThread::is_alloc_failure_gc() {
 564   return _alloc_failure_gc.is_set();
 565 }
 566 
 567 void ShenandoahControlThread::notify_gc_waiters() {
 568   _gc_requested.unset();
 569   MonitorLocker ml(&_gc_waiters_lock);
 570   ml.notify_all();
 571 }
 572 
 573 void ShenandoahControlThread::handle_counters_update() {
 574   if (_do_counters_update.is_set()) {
 575     _do_counters_update.unset();
 576     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 577   }
 578 }
 579 
 580 void ShenandoahControlThread::handle_force_counters_update() {
 581   if (_force_counters_update.is_set()) {
 582     _do_counters_update.unset(); // reset these too, we do update now!
 583     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 584   }
 585 }
 586 
 587 void ShenandoahControlThread::notify_heap_changed() {
 588   // This is called from allocation path, and thus should be fast.
 589 
 590   // Update monitoring counters when we took a new region. This amortizes the
 591   // update costs on slow path.
 592   if (_do_counters_update.is_unset()) {
 593     _do_counters_update.set();
 594   }
 595   // Notify that something had changed.
 596   if (_heap_changed.is_unset()) {
 597     _heap_changed.set();
 598   }
 599 }
 600 
 601 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 602   assert(ShenandoahPacing, "should only call when pacing is enabled");
 603   Atomic::add(words, &_allocs_seen);
 604 }
 605 
 606 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 607   _force_counters_update.set_cond(value);
 608 }
 609 
 610 void ShenandoahControlThread::print() const {
 611   print_on(tty);
 612 }
 613 
 614 void ShenandoahControlThread::print_on(outputStream* st) const {
 615   st->print("Shenandoah Concurrent Thread");
 616   Thread::print_on(st);
 617   st->cr();
 618 }
 619 
 620 void ShenandoahControlThread::start() {
 621   create_and_start();
 622 }
 623 
 624 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 625   _graceful_shutdown.set();
 626 }
 627 
 628 bool ShenandoahControlThread::in_graceful_shutdown() {
 629   return _graceful_shutdown.is_set();
 630 }