1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahControlThread.hpp"
  30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  31 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  33 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  34 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  35 #include "gc/shenandoah/shenandoahUtils.hpp"
  36 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  37 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
  39 #include "memory/iterator.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 
  43 ShenandoahControlThread::ShenandoahControlThread() :
  44   ConcurrentGCThread(),
  45   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
  46   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
  47   _periodic_task(this),
  48   _requested_gc_cause(GCCause::_no_cause_specified),
  49   _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
  50   _allocs_seen(0) {
  51 
  52   reset_gc_id();
  53   create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
  54   _periodic_task.enroll();
  55   _periodic_satb_flush_task.enroll();
  56 }
  57 
  58 ShenandoahControlThread::~ShenandoahControlThread() {
  59   // This is here so that super is called.
  60 }
  61 
  62 void ShenandoahPeriodicTask::task() {
  63   _thread->handle_force_counters_update();
  64   _thread->handle_counters_update();
  65 }
  66 
  67 void ShenandoahPeriodicSATBFlushTask::task() {
  68   ShenandoahHeap::heap()->force_satb_flush_all_threads();
  69 }
  70 
  71 void ShenandoahControlThread::run_service() {
  72   ShenandoahHeap* heap = ShenandoahHeap::heap();
  73 
  74   GCMode default_mode = concurrent_normal;
  75   GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
  76   int sleep = ShenandoahControlIntervalMin;
  77 
  78   double last_shrink_time = os::elapsedTime();
  79   double last_sleep_adjust_time = os::elapsedTime();
  80 
  81   // Shrink period avoids constantly polling regions for shrinking.
  82   // Having a period 10x lower than the delay would mean we hit the
  83   // shrinking with lag of less than 1/10-th of true delay.
  84   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  85   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  86 
  87   ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
  88   ShenandoahHeuristics* heuristics = heap->heuristics();
  89   while (!in_graceful_shutdown() && !should_terminate()) {
  90     // Figure out if we have pending requests.
  91     bool alloc_failure_pending = _alloc_failure_gc.is_set();
  92     bool explicit_gc_requested = _gc_requested.is_set() &&  is_explicit_gc(_requested_gc_cause);
  93     bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
  94 
  95     // This control loop iteration have seen this much allocations.
  96     size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0);
  97 
  98     // Choose which GC mode to run in. The block below should select a single mode.
  99     GCMode mode = none;
 100     GCCause::Cause cause = GCCause::_last_gc_cause;
 101     ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
 102 
 103     if (alloc_failure_pending) {
 104       // Allocation failure takes precedence: we have to deal with it first thing
 105       log_info(gc)("Trigger: Handle Allocation Failure");
 106 
 107       cause = GCCause::_allocation_failure;
 108 
 109       // Consume the degen point, and seed it with default value
 110       degen_point = _degen_point;
 111       _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
 112 
 113       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
 114         heuristics->record_allocation_failure_gc();
 115         policy->record_alloc_failure_to_degenerated(degen_point);
 116         mode = stw_degenerated;
 117       } else {
 118         heuristics->record_allocation_failure_gc();
 119         policy->record_alloc_failure_to_full();
 120         mode = stw_full;
 121       }
 122 
 123     } else if (explicit_gc_requested) {
 124       cause = _requested_gc_cause;
 125       log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
 126 
 127       heuristics->record_requested_gc();
 128 
 129       if (ExplicitGCInvokesConcurrent) {
 130         policy->record_explicit_to_concurrent();
 131         mode = default_mode;
 132         // Unload and clean up everything
 133         heap->set_process_references(heuristics->can_process_references());
 134         heap->set_unload_classes(heuristics->can_unload_classes());
 135       } else {
 136         policy->record_explicit_to_full();
 137         mode = stw_full;
 138       }
 139     } else if (implicit_gc_requested) {
 140       cause = _requested_gc_cause;
 141       log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
 142 
 143       heuristics->record_requested_gc();
 144 
 145       if (ShenandoahImplicitGCInvokesConcurrent) {
 146         policy->record_implicit_to_concurrent();
 147         mode = default_mode;
 148 
 149         // Unload and clean up everything
 150         heap->set_process_references(heuristics->can_process_references());
 151         heap->set_unload_classes(heuristics->can_unload_classes());
 152       } else {
 153         policy->record_implicit_to_full();
 154         mode = stw_full;
 155       }
 156     } else {
 157       // Potential normal cycle: ask heuristics if it wants to act
 158       if (heuristics->should_start_gc()) {
 159         mode = default_mode;
 160         cause = default_cause;
 161       }
 162 
 163       // Ask policy if this cycle wants to process references or unload classes
 164       heap->set_process_references(heuristics->should_process_references());
 165       heap->set_unload_classes(heuristics->should_unload_classes());
 166     }
 167 
 168     // Blow all soft references on this cycle, if handling allocation failure,
 169     // or we are requested to do so unconditionally.
 170     if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) {
 171       heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 172     }
 173 
 174     bool gc_requested = (mode != none);
 175     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 176 
 177     if (gc_requested) {
 178       // GC is starting, bump the internal ID
 179       update_gc_id();
 180 
 181       heap->reset_bytes_allocated_since_gc_start();
 182 
 183       // Use default constructor to snapshot the Metaspace state before GC.
 184       metaspace::MetaspaceSizesSnapshot meta_sizes;
 185 
 186       // If GC was requested, we are sampling the counters even without actual triggers
 187       // from allocation machinery. This captures GC phases more accurately.
 188       set_forced_counters_update(true);
 189 
 190       // If GC was requested, we better dump freeset data for performance debugging
 191       {
 192         ShenandoahHeapLocker locker(heap->lock());
 193         heap->free_set()->log_status();
 194       }
 195 
 196       switch (mode) {
 197         case concurrent_normal:
 198           service_concurrent_normal_cycle(cause);
 199           break;
 200         case stw_degenerated:
 201           service_stw_degenerated_cycle(cause, degen_point);
 202           break;
 203         case stw_full:
 204           service_stw_full_cycle(cause);
 205           break;
 206         default:
 207           ShouldNotReachHere();
 208       }
 209 
 210       // If this was the requested GC cycle, notify waiters about it
 211       if (explicit_gc_requested || implicit_gc_requested) {
 212         notify_gc_waiters();
 213       }
 214 
 215       // If this was the allocation failure GC cycle, notify waiters about it
 216       if (alloc_failure_pending) {
 217         notify_alloc_failure_waiters();
 218       }
 219 
 220       // Report current free set state at the end of cycle, whether
 221       // it is a normal completion, or the abort.
 222       {
 223         ShenandoahHeapLocker locker(heap->lock());
 224         heap->free_set()->log_status();
 225 
 226         // Notify Universe about new heap usage. This has implications for
 227         // global soft refs policy, and we better report it every time heap
 228         // usage goes down.
 229         Universe::update_heap_info_at_gc();
 230       }
 231 
 232       // Disable forced counters update, and update counters one more time
 233       // to capture the state at the end of GC session.
 234       handle_force_counters_update();
 235       set_forced_counters_update(false);
 236 
 237       // Retract forceful part of soft refs policy
 238       heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
 239 
 240       // Clear metaspace oom flag, if current cycle unloaded classes
 241       if (heap->unload_classes()) {
 242         heuristics->clear_metaspace_oom();
 243       }
 244 
 245       // Commit worker statistics to cycle data
 246       heap->phase_timings()->flush_par_workers_to_cycle();
 247 
 248       // Print GC stats for current cycle
 249       {
 250         LogTarget(Info, gc, stats) lt;
 251         if (lt.is_enabled()) {
 252           ResourceMark rm;
 253           LogStream ls(lt);
 254           heap->phase_timings()->print_cycle_on(&ls);
 255         }
 256       }
 257 
 258       // Commit statistics to globals
 259       heap->phase_timings()->flush_cycle_to_global();
 260 
 261       // Print Metaspace change following GC (if logging is enabled).
 262       MetaspaceUtils::print_metaspace_change(meta_sizes);
 263 
 264       // GC is over, we are at idle now
 265       if (ShenandoahPacing) {
 266         heap->pacer()->setup_for_idle();
 267       }
 268     } else {
 269       // Allow allocators to know we have seen this much regions
 270       if (ShenandoahPacing && (allocs_seen > 0)) {
 271         heap->pacer()->report_alloc(allocs_seen);
 272       }
 273     }
 274 
 275     double current = os::elapsedTime();
 276 
 277     if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) {
 278       // Try to uncommit enough stale regions. Explicit GC tries to uncommit everything.
 279       // Regular paths uncommit only occasionally.
 280       double shrink_before = explicit_gc_requested ?
 281                              current :
 282                              current - (ShenandoahUncommitDelay / 1000.0);
 283       service_uncommit(shrink_before);
 284       heap->phase_timings()->flush_cycle_to_global();
 285       last_shrink_time = current;
 286     }
 287 
 288     // Wait before performing the next action. If allocation happened during this wait,
 289     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 290     // back off exponentially.
 291     if (_heap_changed.try_unset()) {
 292       sleep = ShenandoahControlIntervalMin;
 293     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 294       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 295       last_sleep_adjust_time = current;
 296     }
 297     os::naked_short_sleep(sleep);
 298   }
 299 
 300   // Wait for the actual stop(), can't leave run_service() earlier.
 301   while (!should_terminate()) {
 302     os::naked_short_sleep(ShenandoahControlIntervalMin);
 303   }
 304 }
 305 
 306 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 307   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 308   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 309   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 310   // tries to evac something and no memory is available), cycle degrades to Full GC.
 311   //
 312   // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
 313   // heuristics says there are no regions to compact, and all the collection comes from immediately
 314   // reclaimable regions.
 315   //
 316   // ................................................................................................
 317   //
 318   //                                    (immediate garbage shortcut)                Concurrent GC
 319   //                             /-------------------------------------------\
 320   //                             |                                           |
 321   //                             |                                           |
 322   //                             |                                           |
 323   //                             |                                           v
 324   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 325   //                   |                    |                 |              ^
 326   //                   | (af)               | (af)            | (af)         |
 327   // ..................|....................|.................|..............|.......................
 328   //                   |                    |                 |              |
 329   //                   |                    |                 |              |      Degenerated GC
 330   //                   v                    v                 v              |
 331   //               STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
 332   //                   |                    |                 |              ^
 333   //                   | (af)               | (af)            | (af)         |
 334   // ..................|....................|.................|..............|.......................
 335   //                   |                    |                 |              |
 336   //                   |                    v                 |              |      Full GC
 337   //                   \------------------->o<----------------/              |
 338   //                                        |                                |
 339   //                                        v                                |
 340   //                                      Full GC  --------------------------/
 341   //
 342   ShenandoahHeap* heap = ShenandoahHeap::heap();
 343 
 344   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 345 
 346   GCIdMark gc_id_mark;
 347   ShenandoahGCSession session(cause);
 348 
 349   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 350 
 351   // Reset for upcoming marking
 352   heap->entry_reset();
 353 
 354   // Start initial mark under STW
 355   heap->vmop_entry_init_mark();
 356 
 357   // Continue concurrent mark
 358   heap->entry_mark();
 359   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
 360 
 361   // If not cancelled, can try to concurrently pre-clean
 362   heap->entry_preclean();
 363 
 364   // Complete marking under STW, and start evacuation
 365   heap->vmop_entry_final_mark();
 366 
 367   // Process weak roots that might still point to regions that would be broken by cleanup
 368   if (heap->is_concurrent_weak_root_in_progress()) {
 369     heap->entry_weak_roots();
 370   }
 371 
 372   // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
 373   // the space. This would be the last action if there is nothing to evacuate.
 374   heap->entry_cleanup_early();
 375 
 376   {
 377     ShenandoahHeapLocker locker(heap->lock());
 378     heap->free_set()->log_status();
 379   }
 380 
 381   // Perform concurrent class unloading
 382   if (heap->is_concurrent_weak_root_in_progress()) {
 383     heap->entry_class_unloading();
 384   }
 385 
 386   // Processing strong roots
 387   // This may be skipped if there is nothing to update/evacuate.
 388   // If so, strong_root_in_progress would be unset.
 389   if (heap->is_concurrent_strong_root_in_progress()) {
 390     heap->entry_strong_roots();
 391   }
 392 
 393   // Continue the cycle with evacuation and optional update-refs.
 394   // This may be skipped if there is nothing to evacuate.
 395   // If so, evac_in_progress would be unset by collection set preparation code.
 396   if (heap->is_evacuation_in_progress()) {
 397     // Concurrently evacuate
 398     heap->entry_evac();
 399     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
 400 
 401     // Perform update-refs phase.
 402     heap->vmop_entry_init_updaterefs();
 403     heap->entry_updaterefs();
 404     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
 405 
 406     heap->vmop_entry_final_updaterefs();
 407 
 408     // Update references freed up collection set, kick the cleanup to reclaim the space.
 409     heap->entry_cleanup_complete();
 410   }
 411 
 412   // Cycle is complete
 413   heap->heuristics()->record_success_concurrent();
 414   heap->shenandoah_policy()->record_success_concurrent();
 415 }
 416 
 417 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
 418   ShenandoahHeap* heap = ShenandoahHeap::heap();
 419   if (heap->cancelled_gc()) {
 420     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 421     if (!in_graceful_shutdown()) {
 422       assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
 423               "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
 424       _degen_point = point;
 425     }
 426     return true;
 427   }
 428   return false;
 429 }
 430 
 431 void ShenandoahControlThread::stop_service() {
 432   // Nothing to do here.
 433 }
 434 
 435 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
 436   GCIdMark gc_id_mark;
 437   ShenandoahGCSession session(cause);
 438 
 439   ShenandoahHeap* heap = ShenandoahHeap::heap();
 440   heap->vmop_entry_full(cause);
 441 
 442   heap->heuristics()->record_success_full();
 443   heap->shenandoah_policy()->record_success_full();
 444 }
 445 
 446 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 447   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 448 
 449   GCIdMark gc_id_mark;
 450   ShenandoahGCSession session(cause);
 451 
 452   ShenandoahHeap* heap = ShenandoahHeap::heap();
 453   heap->vmop_degenerated(point);
 454 
 455   heap->heuristics()->record_success_degenerated();
 456   heap->shenandoah_policy()->record_success_degenerated();
 457 }
 458 
 459 void ShenandoahControlThread::service_uncommit(double shrink_before) {
 460   ShenandoahHeap* heap = ShenandoahHeap::heap();
 461 
 462   // Determine if there is work to do. This avoids taking heap lock if there is
 463   // no work available, avoids spamming logs with superfluous logging messages,
 464   // and minimises the amount of work while locks are taken.
 465 
 466   if (heap->committed() <= heap->min_capacity()) return;
 467 
 468   bool has_work = false;
 469   for (size_t i = 0; i < heap->num_regions(); i++) {
 470     ShenandoahHeapRegion *r = heap->get_region(i);
 471     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 472       has_work = true;
 473       break;
 474     }
 475   }
 476 
 477   if (has_work) {
 478     heap->entry_uncommit(shrink_before);
 479   }
 480 }
 481 
 482 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
 483   return GCCause::is_user_requested_gc(cause) ||
 484          GCCause::is_serviceability_requested_gc(cause);
 485 }
 486 
 487 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
 488   assert(GCCause::is_user_requested_gc(cause) ||
 489          GCCause::is_serviceability_requested_gc(cause) ||
 490          cause == GCCause::_metadata_GC_clear_soft_refs ||
 491          cause == GCCause::_full_gc_alot ||
 492          cause == GCCause::_wb_full_gc ||
 493          cause == GCCause::_scavenge_alot,
 494          "only requested GCs here");
 495 
 496   if (is_explicit_gc(cause)) {
 497     if (!DisableExplicitGC) {
 498       handle_requested_gc(cause);
 499     }
 500   } else {
 501     handle_requested_gc(cause);
 502   }
 503 }
 504 
 505 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
 506   // Make sure we have at least one complete GC cycle before unblocking
 507   // from the explicit GC request.
 508   //
 509   // This is especially important for weak references cleanup and/or native
 510   // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
 511   // comes very late in the already running cycle, it would miss lots of new
 512   // opportunities for cleanup that were made available before the caller
 513   // requested the GC.
 514   size_t required_gc_id = get_gc_id() + 1;
 515 
 516   MonitorLocker ml(&_gc_waiters_lock);
 517   while (get_gc_id() < required_gc_id) {
 518     _gc_requested.set();
 519     _requested_gc_cause = cause;
 520     ml.wait();
 521   }
 522 }
 523 
 524 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
 525   ShenandoahHeap* heap = ShenandoahHeap::heap();
 526 
 527   assert(current()->is_Java_thread(), "expect Java thread here");
 528 
 529   if (try_set_alloc_failure_gc()) {
 530     // Only report the first allocation failure
 531     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
 532                  req.type_string(),
 533                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
 534 
 535     // Now that alloc failure GC is scheduled, we can abort everything else
 536     heap->cancel_gc(GCCause::_allocation_failure);
 537   }
 538 
 539   MonitorLocker ml(&_alloc_failure_waiters_lock);
 540   while (is_alloc_failure_gc()) {
 541     ml.wait();
 542   }
 543 }
 544 
 545 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
 546   ShenandoahHeap* heap = ShenandoahHeap::heap();
 547 
 548   if (try_set_alloc_failure_gc()) {
 549     // Only report the first allocation failure
 550     log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
 551                  byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
 552   }
 553 
 554   // Forcefully report allocation failure
 555   heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
 556 }
 557 
 558 void ShenandoahControlThread::notify_alloc_failure_waiters() {
 559   _alloc_failure_gc.unset();
 560   MonitorLocker ml(&_alloc_failure_waiters_lock);
 561   ml.notify_all();
 562 }
 563 
 564 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
 565   return _alloc_failure_gc.try_set();
 566 }
 567 
 568 bool ShenandoahControlThread::is_alloc_failure_gc() {
 569   return _alloc_failure_gc.is_set();
 570 }
 571 
 572 void ShenandoahControlThread::notify_gc_waiters() {
 573   _gc_requested.unset();
 574   MonitorLocker ml(&_gc_waiters_lock);
 575   ml.notify_all();
 576 }
 577 
 578 void ShenandoahControlThread::handle_counters_update() {
 579   if (_do_counters_update.is_set()) {
 580     _do_counters_update.unset();
 581     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 582   }
 583 }
 584 
 585 void ShenandoahControlThread::handle_force_counters_update() {
 586   if (_force_counters_update.is_set()) {
 587     _do_counters_update.unset(); // reset these too, we do update now!
 588     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 589   }
 590 }
 591 
 592 void ShenandoahControlThread::notify_heap_changed() {
 593   // This is called from allocation path, and thus should be fast.
 594 
 595   // Update monitoring counters when we took a new region. This amortizes the
 596   // update costs on slow path.
 597   if (_do_counters_update.is_unset()) {
 598     _do_counters_update.set();
 599   }
 600   // Notify that something had changed.
 601   if (_heap_changed.is_unset()) {
 602     _heap_changed.set();
 603   }
 604 }
 605 
 606 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
 607   assert(ShenandoahPacing, "should only call when pacing is enabled");
 608   Atomic::add(&_allocs_seen, words);
 609 }
 610 
 611 void ShenandoahControlThread::set_forced_counters_update(bool value) {
 612   _force_counters_update.set_cond(value);
 613 }
 614 
 615 void ShenandoahControlThread::reset_gc_id() {
 616   Atomic::store(&_gc_id, (size_t)0);
 617 }
 618 
 619 void ShenandoahControlThread::update_gc_id() {
 620   Atomic::inc(&_gc_id);
 621 }
 622 
 623 size_t ShenandoahControlThread::get_gc_id() {
 624   return Atomic::load(&_gc_id);
 625 }
 626 
 627 void ShenandoahControlThread::print() const {
 628   print_on(tty);
 629 }
 630 
 631 void ShenandoahControlThread::print_on(outputStream* st) const {
 632   st->print("Shenandoah Concurrent Thread");
 633   Thread::print_on(st);
 634   st->cr();
 635 }
 636 
 637 void ShenandoahControlThread::start() {
 638   create_and_start();
 639 }
 640 
 641 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
 642   _graceful_shutdown.set();
 643 }
 644 
 645 bool ShenandoahControlThread::in_graceful_shutdown() {
 646   return _graceful_shutdown.is_set();
 647 }