1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcTraceTime.inline.hpp"
  26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  32 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  33 #include "gc/shenandoah/shenandoahUtils.hpp"
  34 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  35 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  36 #include "memory/iterator.hpp"
  37 #include "memory/universe.hpp"
  38 #include "runtime/vmThread.hpp"
  39 
  40 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  41   ConcurrentGCThread(),
  42   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true, Monitor::_safepoint_check_always),
  43   _conc_gc_lock(Mutex::leaf, "ShenandoahConcGC_lock", true, Monitor::_safepoint_check_always),
  44   _periodic_task(this),
  45   _do_full_gc(0),
  46   _do_concurrent_gc(0),
  47   _do_counters_update(0),
  48   _full_gc_cause(GCCause::_no_cause_specified),
  49   _graceful_shutdown(0)
  50 {
  51   create_and_start();
  52   _periodic_task.enroll();
  53 }
  54 
  55 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  56   // This is here so that super is called.
  57 }
  58 
  59 void ShenandoahPeriodicTask::task() {
  60   _thread->do_counters_update();
  61 }
  62 
  63 void ShenandoahConcurrentThread::run_service() {
  64   ShenandoahHeap* heap = ShenandoahHeap::heap();
  65 
  66   double last_shrink_time = os::elapsedTime();
  67 
  68   // Shrink period avoids constantly polling regions for shrinking.
  69   // Having a period 10x lower than the delay would mean we hit the
  70   // shrinking with lag of less than 1/10-th of true delay.
  71   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  72   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  73 
  74   while (!in_graceful_shutdown() && !should_terminate()) {
  75     bool partial_gc_requested = heap->shenandoahPolicy()->should_start_partial_gc();
  76     bool conc_gc_requested = is_conc_gc_requested() || heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity());
  77     bool full_gc_requested = is_full_gc();
  78     bool gc_requested = partial_gc_requested || conc_gc_requested || full_gc_requested;
  79 
  80     if (full_gc_requested) {
  81       service_fullgc_cycle();
  82     } else if (partial_gc_requested) {
  83       service_partial_cycle();
  84     } else if (conc_gc_requested) {
  85       service_normal_cycle();
  86     }
  87 
  88     if (gc_requested) {
  89       // Update counters when GC was requested
  90       do_counters_update();
  91 
  92       // Coming out of (cancelled) concurrent GC, reset these for sanity
  93       if (heap->is_evacuation_in_progress()) {
  94         heap->set_evacuation_in_progress_concurrently(false);
  95       }
  96 
  97       if (heap->is_update_refs_in_progress()) {
  98         heap->set_update_refs_in_progress(false);
  99       }
 100 
 101       reset_conc_gc_requested();
 102     } else {
 103       Thread::current()->_ParkEvent->park(10);
 104     }
 105 
 106     // Try to uncommit stale regions
 107     double current = os::elapsedTime();
 108     if (current - last_shrink_time > shrink_period) {
 109       heap->handle_heap_shrinkage();
 110       last_shrink_time = current;
 111     }
 112 
 113     // Make sure the _do_full_gc flag changes are seen.
 114     OrderAccess::storeload();
 115   }
 116 
 117   // Wait for the actual stop(), can't leave run_service() earlier.
 118   while (!should_terminate()) {
 119     Thread::current()->_ParkEvent->park(10);
 120   }
 121 }
 122 
 123 void ShenandoahConcurrentThread::service_partial_cycle() {
 124   GCIdMark gc_id_mark;
 125 
 126   ShenandoahHeap* heap = ShenandoahHeap::heap();
 127   TraceCollectorStats tcs(heap->monitoring_support()->partial_collection_counters());
 128 
 129   {
 130     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 131     ShenandoahGCPhase partial_phase(ShenandoahPhaseTimings::init_partial_gc_gross);
 132     VM_ShenandoahInitPartialGC init_partial_gc;
 133     VMThread::execute(&init_partial_gc);
 134   }
 135 
 136   {
 137     GCTraceTime(Info, gc) time("Concurrent partial GC", heap->gc_timer(), GCCause::_no_gc, true);
 138     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 139     ShenandoahHeap::heap()->partial_gc()->concurrent_partial_collection();
 140   }
 141 
 142   {
 143     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 144     ShenandoahGCPhase partial_phase(ShenandoahPhaseTimings::final_partial_gc_gross);
 145     VM_ShenandoahFinalPartialGC final_partial_gc;
 146     VMThread::execute(&final_partial_gc);
 147   }
 148 
 149   {
 150     GCTraceTime(Info, gc) time("Concurrent cleanup", heap->gc_timer(), GCCause::_no_gc, true);
 151     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 152     ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 153     heap->recycle_trash();
 154   }
 155 
 156   // TODO: Call this properly with Shenandoah*CycleMark
 157   heap->set_used_at_last_gc();
 158 }
 159 
 160 void ShenandoahConcurrentThread::service_normal_cycle() {
 161   if (check_cancellation()) return;
 162 
 163   ShenandoahHeap* heap = ShenandoahHeap::heap();
 164 
 165   GCTimer* gc_timer = heap->gc_timer();
 166 
 167   ShenandoahGCSession session;
 168 
 169   // Cycle started
 170   heap->shenandoahPolicy()->record_cycle_start();
 171 
 172   // Capture peak occupancy right after starting the cycle
 173   heap->shenandoahPolicy()->record_peak_occupancy();
 174 
 175   GCIdMark gc_id_mark;
 176   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 177   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 178 
 179   // Start initial mark under STW:
 180   {
 181     // Workers are setup by VM_ShenandoahInitMark
 182     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 183     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 184     ShenandoahGCPhase init_mark_phase(ShenandoahPhaseTimings::init_mark_gross);
 185     VM_ShenandoahInitMark initMark;
 186     VMThread::execute(&initMark);
 187   }
 188 
 189   if (check_cancellation()) return;
 190 
 191   // Continue concurrent mark:
 192   {
 193     // Setup workers for concurrent marking phase
 194     WorkGang* workers = heap->workers();
 195     uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_marking();
 196     ShenandoahWorkerScope scope(workers, n_workers);
 197 
 198     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);
 199     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 200     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 201   }
 202 
 203   // Allocations happen during concurrent mark, record peak after the phase:
 204   heap->shenandoahPolicy()->record_peak_occupancy();
 205 
 206   // Possibly hand over remaining marking work to final-mark phase.
 207   bool clear_full_gc = false;
 208   if (heap->cancelled_concgc()) {
 209     heap->shenandoahPolicy()->record_cm_cancelled();
 210     if (_full_gc_cause == GCCause::_allocation_failure &&
 211         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 212       heap->clear_cancelled_concgc();
 213       clear_full_gc = true;
 214       heap->shenandoahPolicy()->record_cm_degenerated();
 215     } else {
 216       return;
 217     }
 218   } else {
 219     heap->shenandoahPolicy()->record_cm_success();
 220 
 221     // If not cancelled, can try to concurrently pre-clean
 222     if (ShenandoahPreclean) {
 223       if (heap->concurrentMark()->process_references()) {
 224         GCTraceTime(Info, gc) time("Concurrent precleaning", gc_timer, GCCause::_no_gc, true);
 225         ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
 226         heap->concurrentMark()->preclean_weak_refs();
 227 
 228         // Allocations happen during concurrent preclean, record peak after the phase:
 229         heap->shenandoahPolicy()->record_peak_occupancy();
 230       }
 231     }
 232   }
 233 
 234   // Proceed to complete marking under STW, and start evacuation:
 235   {
 236     // Workers are setup by VM_ShenandoahFinalMarkStartEvac
 237     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 238     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 239     ShenandoahGCPhase final_mark_phase(ShenandoahPhaseTimings::final_mark_gross);
 240     VM_ShenandoahFinalMarkStartEvac finishMark;
 241     VMThread::execute(&finishMark);
 242   }
 243 
 244   if (check_cancellation()) return;
 245 
 246   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 247   if (clear_full_gc) {
 248     reset_full_gc();
 249   }
 250 
 251   // Final mark had reclaimed some immediate garbage, kick cleanup to reclaim the space.
 252   {
 253     GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 254     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 255     ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 256     heap->recycle_trash();
 257   }
 258 
 259   // Perform concurrent evacuation, if required.
 260   // This phase can be skipped if there is nothing to evacuate. If so, evac_in_progress would be unset
 261   // by collection set preparation code.
 262   if (heap->is_evacuation_in_progress()) {
 263 
 264     // Setup workers for concurrent evacuation phase
 265     WorkGang* workers = heap->workers();
 266     uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_evac();
 267     ShenandoahWorkerScope scope(workers, n_workers);
 268 
 269     GCTraceTime(Info, gc) time("Concurrent evacuation ", gc_timer, GCCause::_no_gc, true);
 270     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 271     heap->do_evacuation();
 272 
 273     // Allocations happen during evacuation, record peak after the phase:
 274     heap->shenandoahPolicy()->record_peak_occupancy();
 275 
 276     if (check_cancellation()) return;
 277   }
 278 
 279   // Perform update-refs phase, if required.
 280   // This phase can be skipped if there was nothing evacuated. If so, need_update_refs would be unset
 281   // by collection set preparation code. However, adaptive heuristics need to record "success" when
 282   // this phase is skipped. Therefore, we conditionally execute all ops, leaving heuristics adjustments
 283   // intact.
 284   if (heap->shenandoahPolicy()->should_start_update_refs()) {
 285 
 286     bool do_it = heap->need_update_refs();
 287     if (do_it) {
 288       {
 289         TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 290         ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 291         ShenandoahGCPhase init_update_refs_phase(ShenandoahPhaseTimings::init_update_refs_gross);
 292         VM_ShenandoahInitUpdateRefs init_update_refs;
 293         VMThread::execute(&init_update_refs);
 294       }
 295 
 296       {
 297         GCTraceTime(Info, gc) time("Concurrent update references ", gc_timer, GCCause::_no_gc, true);
 298         WorkGang* workers = heap->workers();
 299         uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref();
 300         ShenandoahWorkerScope scope(workers, n_workers);
 301         heap->concurrent_update_heap_references();
 302       }
 303     }
 304 
 305     // Allocations happen during update-refs, record peak after the phase:
 306     heap->shenandoahPolicy()->record_peak_occupancy();
 307 
 308     clear_full_gc = false;
 309     if (heap->cancelled_concgc()) {
 310       heap->shenandoahPolicy()->record_uprefs_cancelled();
 311       if (_full_gc_cause == GCCause::_allocation_failure &&
 312           heap->shenandoahPolicy()->handover_cancelled_uprefs()) {
 313         clear_full_gc = true;
 314         heap->shenandoahPolicy()->record_uprefs_degenerated();
 315       } else {
 316         return;
 317       }
 318     } else {
 319       heap->shenandoahPolicy()->record_uprefs_success();
 320     }
 321 
 322     if (do_it) {
 323       TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 324       ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
 325       ShenandoahGCPhase final_update_refs_phase(ShenandoahPhaseTimings::final_update_refs_gross);
 326       VM_ShenandoahFinalUpdateRefs final_update_refs;
 327       VMThread::execute(&final_update_refs);
 328     }
 329   } else {
 330     // If update-refs were skipped, need to do another verification pass after evacuation.
 331     if (ShenandoahVerify && !check_cancellation()) {
 332       VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
 333       VMThread::execute(&verify_after_evacuation);
 334     }
 335   }
 336 
 337   // Prepare for the next normal cycle:
 338   if (check_cancellation()) return;
 339 
 340   if (clear_full_gc) {
 341     reset_full_gc();
 342   }
 343 
 344   {
 345     GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 346     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 347 
 348     {
 349       ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 350       heap->recycle_trash();
 351     }
 352 
 353     {
 354       ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
 355       WorkGang *workers = heap->workers();
 356       ShenandoahPushWorkerScope scope(workers, ConcGCThreads);
 357       heap->reset_next_mark_bitmap(workers);
 358     }
 359   }
 360 
 361   // Allocations happen during bitmap cleanup, record peak after the phase:
 362   heap->shenandoahPolicy()->record_peak_occupancy();
 363 
 364   // Cycle is complete
 365   heap->shenandoahPolicy()->record_cycle_end();
 366 
 367   // TODO: Call this properly with Shenandoah*CycleMark
 368   heap->set_used_at_last_gc();
 369 }
 370 
 371 bool ShenandoahConcurrentThread::check_cancellation() {
 372   ShenandoahHeap* heap = ShenandoahHeap::heap();
 373   if (heap->cancelled_concgc()) {
 374     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 375     return true;
 376   }
 377   return false;
 378 }
 379 
 380 
 381 void ShenandoahConcurrentThread::stop_service() {
 382   // Nothing to do here.
 383 }
 384 
 385 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 386   GCIdMark gc_id_mark;
 387   ShenandoahHeap* heap = ShenandoahHeap::heap();
 388 
 389   {
 390     if (_full_gc_cause == GCCause::_allocation_failure) {
 391       heap->shenandoahPolicy()->record_allocation_failure_gc();
 392     } else {
 393       heap->shenandoahPolicy()->record_user_requested_gc();
 394     }
 395 
 396     TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
 397     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 398     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 399     VMThread::execute(&full_gc);
 400   }
 401 
 402   reset_full_gc();
 403 }
 404 
 405 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 406   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 407 
 408   if (try_set_full_gc()) {
 409     _full_gc_cause = cause;
 410 
 411     // Now that full GC is scheduled, we can abort everything else
 412     ShenandoahHeap::heap()->cancel_concgc(cause);
 413   } else {
 414     GCCause::Cause last_cause = _full_gc_cause;
 415     if (last_cause != cause) {
 416       switch (cause) {
 417         // These GC causes take precedence:
 418         case GCCause::_allocation_failure:
 419           log_info(gc)("Full GC was already pending with cause: %s; new cause is %s, overwriting",
 420                        GCCause::to_string(last_cause),
 421                        GCCause::to_string(cause));
 422           _full_gc_cause = cause;
 423           break;
 424         // Other GC causes can be ignored
 425         default:
 426           log_info(gc)("Full GC is already pending with cause: %s; new cause was %s, ignoring",
 427                        GCCause::to_string(last_cause),
 428                        GCCause::to_string(cause));
 429           break;
 430       }
 431     }
 432   }
 433 
 434   MonitorLockerEx ml(&_full_gc_lock);
 435   while (is_full_gc()) {
 436     ml.wait();
 437   }
 438   assert(!is_full_gc(), "expect full GC to have completed");
 439 }
 440 
 441 void ShenandoahConcurrentThread::reset_full_gc() {
 442   OrderAccess::release_store_fence(&_do_full_gc, 0);
 443   MonitorLockerEx ml(&_full_gc_lock);
 444   ml.notify_all();
 445 }
 446 
 447 bool ShenandoahConcurrentThread::try_set_full_gc() {
 448   jbyte old = Atomic::cmpxchg((jbyte)1, &_do_full_gc, (jbyte)0);
 449   return old == 0; // success
 450 }
 451 
 452 bool ShenandoahConcurrentThread::is_full_gc() {
 453   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 454 }
 455 
 456 bool ShenandoahConcurrentThread::is_conc_gc_requested() {
 457   return OrderAccess::load_acquire(&_do_concurrent_gc) == 1;
 458 }
 459 
 460 void ShenandoahConcurrentThread::do_conc_gc() {
 461   OrderAccess::release_store_fence(&_do_concurrent_gc, 1);
 462   MonitorLockerEx ml(&_conc_gc_lock);
 463   ml.wait();
 464 }
 465 
 466 void ShenandoahConcurrentThread::reset_conc_gc_requested() {
 467   OrderAccess::release_store_fence(&_do_concurrent_gc, 0);
 468   MonitorLockerEx ml(&_conc_gc_lock);
 469   ml.notify_all();
 470 }
 471 
 472 void ShenandoahConcurrentThread::do_counters_update() {
 473   if (OrderAccess::load_acquire(&_do_counters_update) == 1) {
 474     OrderAccess::release_store(&_do_counters_update, 0);
 475     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 476   }
 477 }
 478 
 479 void ShenandoahConcurrentThread::trigger_counters_update() {
 480   if (OrderAccess::load_acquire(&_do_counters_update) == 0) {
 481     OrderAccess::release_store(&_do_counters_update, 1);
 482   }
 483 }
 484 
 485 void ShenandoahConcurrentThread::print() const {
 486   print_on(tty);
 487 }
 488 
 489 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 490   st->print("Shenandoah Concurrent Thread");
 491   Thread::print_on(st);
 492   st->cr();
 493 }
 494 
 495 void ShenandoahConcurrentThread::start() {
 496   create_and_start();
 497 }
 498 
 499 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 500   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 501 }
 502 
 503 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 504   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 505 }