1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcTraceTime.inline.hpp"
  26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  33 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  34 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  35 #include "gc/shenandoah/shenandoahUtils.hpp"
  36 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  37 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "memory/universe.hpp"
  40 #include "runtime/vmThread.hpp"
  41 
  42 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  43   ConcurrentGCThread(),
  44   _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
  45   _explicit_gc_waiters_lock(Mutex::leaf, "ShenandoahExplicitGC_lock", true, Monitor::_safepoint_check_always),
  46   _periodic_task(this),
  47   _explicit_gc_cause(GCCause::_no_cause_specified),
  48   _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
  49   _allocs_seen(0)
  50 {
  51   create_and_start();
  52   _periodic_task.enroll();
  53 }
  54 
  55 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  56   // This is here so that super is called.
  57 }
  58 
  59 void ShenandoahPeriodicTask::task() {
  60   _thread->handle_force_counters_update();
  61   _thread->handle_counters_update();
  62 }
  63 
  64 void ShenandoahConcurrentThread::run_service() {
  65   ShenandoahHeap* heap = ShenandoahHeap::heap();
  66 
  67   int sleep = ShenandoahControlIntervalMin;
  68 
  69   double last_shrink_time = os::elapsedTime();
  70   double last_sleep_adjust_time = os::elapsedTime();
  71 
  72   // Shrink period avoids constantly polling regions for shrinking.
  73   // Having a period 10x lower than the delay would mean we hit the
  74   // shrinking with lag of less than 1/10-th of true delay.
  75   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  76   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  77 
  78   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
  79 
  80   while (!in_graceful_shutdown() && !should_terminate()) {
  81     // Figure out if we have pending requests.
  82     bool alloc_failure_pending = _alloc_failure_gc.is_set();
  83     bool explicit_gc_requested = _explicit_gc.is_set();
  84 
  85     // This control loop iteration have seen this much allocations.
  86     size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
  87 
  88     // Choose which GC mode to run in. The block below should select a single mode.
  89     GCMode mode = none;
  90     GCCause::Cause cause = GCCause::_last_gc_cause;
  91     ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
  92 
  93     if (alloc_failure_pending) {
  94       // Allocation failure takes precedence: we have to deal with it first thing
  95       cause = GCCause::_allocation_failure;
  96 
  97       // Consume the degen point, and seed it with default value
  98       degen_point = _degen_point;
  99       _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
 100 
 101       if (ShenandoahDegeneratedGC && policy->should_degenerate_cycle()) {
 102         policy->record_alloc_failure_to_degenerated(degen_point);
 103         mode = stw_degenerated;
 104       } else {
 105         policy->record_alloc_failure_to_full();
 106         mode = stw_full;
 107       }
 108 
 109     } else if (explicit_gc_requested) {
 110       // Honor explicit GC requests
 111       if (ExplicitGCInvokesConcurrent) {
 112         policy->record_explicit_to_concurrent();
 113         if (policy->can_do_traversal_gc()) {
 114           mode = concurrent_traversal;
 115         } else {
 116           mode = concurrent_normal;
 117         }
 118       } else {
 119         policy->record_explicit_to_full();
 120         mode = stw_full;
 121       }
 122       cause = _explicit_gc_cause;
 123     } else {
 124       // Potential normal cycle: ask heuristics if it wants to act
 125       if (policy->should_start_partial_gc()) {
 126         mode = concurrent_partial;
 127         cause = GCCause::_shenandoah_partial_gc;
 128       } else if (policy->should_start_traversal_gc()) {
 129         mode = concurrent_traversal;
 130         cause = GCCause::_shenandoah_traversal_gc;
 131       } else if (policy->should_start_normal_gc()) {
 132         mode = concurrent_normal;
 133         cause = GCCause::_shenandoah_concurrent_gc;
 134       }
 135 
 136       // Ask policy if this cycle wants to process references or unload classes
 137       heap->set_process_references(policy->should_process_references());
 138       heap->set_unload_classes(policy->should_unload_classes());
 139     }
 140 
 141     bool gc_requested = (mode != none);
 142     assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
 143 
 144     if (gc_requested) {
 145       heap->reset_bytes_allocated_since_gc_start();
 146 
 147       // If GC was requested, we are sampling the counters even without actual triggers
 148       // from allocation machinery. This captures GC phases more accurately.
 149       set_forced_counters_update(true);
 150 
 151       // If GC was requested, we better dump freeset data for performance debugging
 152       {
 153         ShenandoahHeapLocker locker(heap->lock());
 154         heap->free_set()->log_status_verbose();
 155       }
 156     }
 157 
 158     switch (mode) {
 159       case none:
 160         break;
 161       case concurrent_partial:
 162         service_concurrent_partial_cycle(cause);
 163         break;
 164       case concurrent_traversal:
 165         service_concurrent_traversal_cycle(cause);
 166         break;
 167       case concurrent_normal:
 168         service_concurrent_normal_cycle(cause);
 169         break;
 170       case stw_degenerated:
 171         service_stw_degenerated_cycle(cause, degen_point);
 172         break;
 173       case stw_full:
 174         service_stw_full_cycle(cause);
 175         break;
 176       default:
 177         ShouldNotReachHere();
 178     }
 179 
 180     if (gc_requested) {
 181       heap->set_used_at_last_gc();
 182 
 183       // If this was the explicit GC cycle, notify waiters about it
 184       if (explicit_gc_requested) {
 185         notify_explicit_gc_waiters();
 186 
 187         // Explicit GC tries to uncommit everything
 188         heap->handle_heap_shrinkage(os::elapsedTime());
 189       }
 190 
 191       // If this was the allocation failure GC cycle, notify waiters about it
 192       if (alloc_failure_pending) {
 193         notify_alloc_failure_waiters();
 194       }
 195 
 196       // Report current free set state at the end of cycle, whether
 197       // it is a normal completion, or the abort.
 198       {
 199         ShenandoahHeapLocker locker(heap->lock());
 200         heap->free_set()->log_status_verbose();
 201       }
 202 
 203       // Disable forced counters update, and update counters one more time
 204       // to capture the state at the end of GC session.
 205       handle_force_counters_update();
 206       set_forced_counters_update(false);
 207 
 208       // GC is over, we are at idle now
 209       if (ShenandoahPacing) {
 210         heap->pacer()->setup_for_idle();
 211       }
 212     } else {
 213       // Allow allocators to know we have seen this much regions
 214       if (ShenandoahPacing && (allocs_seen > 0)) {
 215         heap->pacer()->report_alloc(allocs_seen);
 216       }
 217     }
 218 
 219     double current = os::elapsedTime();
 220 
 221     // Try to uncommit stale regions
 222     if (current - last_shrink_time > shrink_period) {
 223       heap->handle_heap_shrinkage(current - (ShenandoahUncommitDelay / 1000.0));
 224       last_shrink_time = current;
 225     }
 226 
 227     // Wait before performing the next action. If allocation happened during this wait,
 228     // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
 229     // back off exponentially.
 230     if (_heap_changed.try_unset()) {
 231       sleep = ShenandoahControlIntervalMin;
 232     } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
 233       sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
 234       last_sleep_adjust_time = current;
 235     }
 236     os::naked_short_sleep(sleep);
 237   }
 238 
 239   // Wait for the actual stop(), can't leave run_service() earlier.
 240   while (!should_terminate()) {
 241     os::naked_short_sleep(ShenandoahControlIntervalMin);
 242   }
 243 }
 244 
 245 void ShenandoahConcurrentThread::service_concurrent_partial_cycle(GCCause::Cause cause) {
 246   ShenandoahHeap* heap = ShenandoahHeap::heap();
 247   ShenandoahPartialGC* partial_gc = heap->partial_gc();
 248 
 249   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 250 
 251   GCIdMark gc_id_mark;
 252   ShenandoahGCSession session;
 253 
 254   TraceCollectorStats tcs(heap->monitoring_support()->partial_collection_counters());
 255 
 256   heap->vmop_entry_init_partial();
 257   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return;
 258 
 259   if (!partial_gc->has_work()) return;
 260 
 261   heap->entry_partial();
 262   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return;
 263 
 264   heap->vmop_entry_final_partial();
 265   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return;
 266 
 267   heap->entry_cleanup();
 268 
 269   heap->shenandoahPolicy()->record_success_partial();
 270 }
 271 
 272 void ShenandoahConcurrentThread::service_concurrent_traversal_cycle(GCCause::Cause cause) {
 273   GCIdMark gc_id_mark;
 274   ShenandoahGCSession session;
 275 
 276   ShenandoahHeap* heap = ShenandoahHeap::heap();
 277   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 278 
 279   heap->vmop_entry_init_traversal();
 280 
 281   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
 282 
 283   heap->entry_traversal();
 284 
 285   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return;
 286 
 287   heap->vmop_entry_final_traversal();
 288 
 289   heap->entry_cleanup_bitmaps();
 290 
 291   heap->shenandoahPolicy()->record_success_concurrent();
 292 }
 293 
 294 void ShenandoahConcurrentThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
 295   // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
 296   // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
 297   // If second allocation failure happens during Degenerated GC cycle (for example, when GC
 298   // tries to evac something and no memory is available), cycle degrades to Full GC.
 299   //
 300   // The only current exception is allocation failure in Conc Evac: it goes straight to Full GC,
 301   // because we don't recover well from the case of incompletely evacuated heap in STW cycle.
 302   //
 303   // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when
 304   // heuristics says there are no regions to compact, and all the collection comes from immediately
 305   // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the
 306   // mark from the next cycle.
 307   //
 308   // ................................................................................................
 309   //
 310   //                                    (immediate garbage shortcut)                Concurrent GC
 311   //                             /-------------------------------------------\
 312   //                             |                       (coalesced UR)      v
 313   //                             |                  /----------------------->o
 314   //                             |                  |                        |
 315   //                             |                  |                        v
 316   // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
 317   //                   |                    |                 |              ^
 318   //                   | (af)               | (af)            | (af)         |
 319   // ..................|....................|.................|..............|.......................
 320   //                   |                    |                 |              |
 321   //                   |          /---------/                 |              |      Degenerated GC
 322   //                   v          |                           v              |
 323   //               STW Mark ------+---> STW Evac ----> STW Update-Refs ----->o
 324   //                   |          |         |                 |              ^
 325   //                   | (af)     |         | (af)            | (af)         |
 326   // ..................|..........|.........|.................|..............|.......................
 327   //                   |          |         |                 |              |
 328   //                   |          v         v                 |              |      Full GC
 329   //                   \--------->o-------->o<----------------/              |
 330   //                                        |                                |
 331   //                                        v                                |
 332   //                                      Full GC  --------------------------/
 333   //
 334   ShenandoahHeap* heap = ShenandoahHeap::heap();
 335 
 336   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
 337 
 338   GCIdMark gc_id_mark;
 339   ShenandoahGCSession session;
 340 
 341   // Capture peak occupancy right after starting the cycle
 342   heap->shenandoahPolicy()->record_peak_occupancy();
 343 
 344   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 345 
 346   // Start initial mark under STW
 347   heap->vmop_entry_init_mark();
 348 
 349   // Continue concurrent mark
 350   heap->entry_mark();
 351   if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
 352 
 353   // If not cancelled, can try to concurrently pre-clean
 354   heap->entry_preclean();
 355 
 356   // Complete marking under STW, and start evacuation
 357   heap->vmop_entry_final_mark();
 358 
 359   // Continue the cycle with evacuation and optional update-refs.
 360   // This may be skipped if there is nothing to evacuate.
 361   // If so, evac_in_progress would be unset by collection set preparation code.
 362   if (heap->is_evacuation_in_progress()) {
 363     // Final mark had reclaimed some immediate garbage, kick cleanup to reclaim the space
 364     // for the rest of the cycle.
 365     heap->entry_cleanup();
 366 
 367     // Concurrently evacuate
 368     heap->entry_evac();
 369     if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
 370 
 371     // Perform update-refs phase, if required. This phase can be skipped if heuristics
 372     // decides to piggy-back the update-refs on the next marking cycle. On either path,
 373     // we need to turn off evacuation: either in init-update-refs, or in final-evac.
 374     if (heap->shenandoahPolicy()->should_start_update_refs()) {
 375       heap->vmop_entry_init_updaterefs();
 376       heap->entry_updaterefs();
 377       if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
 378 
 379       heap->vmop_entry_final_updaterefs();
 380     } else {
 381       heap->vmop_entry_final_evac();
 382     }
 383   }
 384 
 385   // Reclaim space and prepare for the next normal cycle:
 386   heap->entry_cleanup_bitmaps();
 387 
 388   // Cycle is complete
 389   heap->shenandoahPolicy()->record_success_concurrent();
 390 }
 391 
 392 bool ShenandoahConcurrentThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
 393   ShenandoahHeap* heap = ShenandoahHeap::heap();
 394   if (heap->cancelled_concgc()) {
 395     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
 396     if (!in_graceful_shutdown()) {
 397       assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
 398               "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
 399       _degen_point = point;
 400     }
 401     return true;
 402   }
 403   return false;
 404 }
 405 
 406 void ShenandoahConcurrentThread::stop_service() {
 407   // Nothing to do here.
 408 }
 409 
 410 void ShenandoahConcurrentThread::service_stw_full_cycle(GCCause::Cause cause) {
 411   GCIdMark gc_id_mark;
 412   ShenandoahGCSession session;
 413 
 414   ShenandoahHeap* heap = ShenandoahHeap::heap();
 415   heap->vmop_entry_full(cause);
 416 
 417   heap->shenandoahPolicy()->record_success_full();
 418 }
 419 
 420 void ShenandoahConcurrentThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
 421   assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
 422 
 423   GCIdMark gc_id_mark;
 424   ShenandoahGCSession session;
 425 
 426   ShenandoahHeap* heap = ShenandoahHeap::heap();
 427   heap->vmop_degenerated(point);
 428 
 429   heap->shenandoahPolicy()->record_success_degenerated();
 430 }
 431 
 432 void ShenandoahConcurrentThread::handle_explicit_gc(GCCause::Cause cause) {
 433   assert(GCCause::is_user_requested_gc(cause) || GCCause::is_serviceability_requested_gc(cause),
 434          "only requested GCs here");
 435   if (!DisableExplicitGC) {
 436     _explicit_gc_cause = cause;
 437 
 438     _explicit_gc.set();
 439     MonitorLockerEx ml(&_explicit_gc_waiters_lock);
 440     while (_explicit_gc.is_set()) {
 441       ml.wait();
 442     }
 443   }
 444 }
 445 
 446 void ShenandoahConcurrentThread::handle_alloc_failure(size_t words) {
 447   ShenandoahHeap* heap = ShenandoahHeap::heap();
 448 
 449   heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 450   assert(current()->is_Java_thread(), "expect Java thread here");
 451 
 452   if (try_set_alloc_failure_gc()) {
 453     // Only report the first allocation failure
 454     log_info(gc)("Failed to allocate " SIZE_FORMAT "K", words * HeapWordSize / K);
 455 
 456     // Now that alloc failure GC is scheduled, we can abort everything else
 457     heap->cancel_concgc(GCCause::_allocation_failure);
 458   }
 459 
 460   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 461   while (is_alloc_failure_gc()) {
 462     ml.wait();
 463   }
 464   assert(!is_alloc_failure_gc(), "expect alloc failure GC to have completed");
 465 }
 466 
 467 void ShenandoahConcurrentThread::handle_alloc_failure_evac(size_t words) {
 468   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule GC by thread %d",
 469                         Thread::current()->osthread()->thread_id());
 470 
 471   ShenandoahHeap* heap = ShenandoahHeap::heap();
 472   heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
 473 
 474   if (try_set_alloc_failure_gc()) {
 475     // Only report the first allocation failure
 476     log_info(gc)("Failed to allocate " SIZE_FORMAT "K for evacuation", words * HeapWordSize / K);
 477   }
 478 
 479   // Forcefully report allocation failure
 480   heap->cancel_concgc(GCCause::_shenandoah_allocation_failure_evac);
 481 }
 482 
 483 void ShenandoahConcurrentThread::notify_alloc_failure_waiters() {
 484   _alloc_failure_gc.unset();
 485   MonitorLockerEx ml(&_alloc_failure_waiters_lock);
 486   ml.notify_all();
 487 }
 488 
 489 bool ShenandoahConcurrentThread::try_set_alloc_failure_gc() {
 490   return _alloc_failure_gc.try_set();
 491 }
 492 
 493 bool ShenandoahConcurrentThread::is_alloc_failure_gc() {
 494   return _alloc_failure_gc.is_set();
 495 }
 496 
 497 void ShenandoahConcurrentThread::notify_explicit_gc_waiters() {
 498   _explicit_gc.unset();
 499   MonitorLockerEx ml(&_explicit_gc_waiters_lock);
 500   ml.notify_all();
 501 }
 502 
 503 void ShenandoahConcurrentThread::handle_counters_update() {
 504   if (_do_counters_update.is_set()) {
 505     _do_counters_update.unset();
 506     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 507   }
 508 }
 509 
 510 void ShenandoahConcurrentThread::handle_force_counters_update() {
 511   if (_force_counters_update.is_set()) {
 512     _do_counters_update.unset(); // reset these too, we do update now!
 513     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 514   }
 515 }
 516 
 517 void ShenandoahConcurrentThread::notify_heap_changed() {
 518   // This is called from allocation path, and thus should be fast.
 519 
 520   // Update monitoring counters when we took a new region. This amortizes the
 521   // update costs on slow path.
 522   if (_do_counters_update.is_unset()) {
 523     _do_counters_update.set();
 524   }
 525   // Notify that something had changed.
 526   if (_heap_changed.is_unset()) {
 527     _heap_changed.set();
 528   }
 529 }
 530 
 531 void ShenandoahConcurrentThread::pacing_notify_alloc(size_t words) {
 532   assert(ShenandoahPacing, "should only call when pacing is enabled");
 533   Atomic::add(words, &_allocs_seen);
 534 }
 535 
 536 void ShenandoahConcurrentThread::set_forced_counters_update(bool value) {
 537   _force_counters_update.set_cond(value);
 538 }
 539 
 540 void ShenandoahConcurrentThread::print() const {
 541   print_on(tty);
 542 }
 543 
 544 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 545   st->print("Shenandoah Concurrent Thread");
 546   Thread::print_on(st);
 547   st->cr();
 548 }
 549 
 550 void ShenandoahConcurrentThread::start() {
 551   create_and_start();
 552 }
 553 
 554 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 555   _graceful_shutdown.set();
 556 }
 557 
 558 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 559   return _graceful_shutdown.is_set();
 560 }