1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcTraceTime.inline.hpp"
  26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  32 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  33 #include "gc/shenandoah/shenandoahUtils.hpp"
  34 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  35 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  36 #include "memory/iterator.hpp"
  37 #include "memory/universe.hpp"
  38 #include "runtime/vmThread.hpp"
  39 
  40 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  41   ConcurrentGCThread(),
  42   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true, Monitor::_safepoint_check_always),
  43   _conc_gc_lock(Mutex::leaf, "ShenandoahConcGC_lock", true, Monitor::_safepoint_check_always),
  44   _periodic_task(this),
  45   _do_full_gc(0),
  46   _do_concurrent_gc(0),
  47   _do_counters_update(0),
  48   _full_gc_cause(GCCause::_no_cause_specified),
  49   _graceful_shutdown(0)
  50 {
  51   create_and_start();
  52   _periodic_task.enroll();
  53 }
  54 
  55 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  56   // This is here so that super is called.
  57 }
  58 
  59 void ShenandoahPeriodicTask::task() {
  60   _thread->do_counters_update();
  61 }
  62 
  63 void ShenandoahConcurrentThread::run_service() {
  64   ShenandoahHeap* heap = ShenandoahHeap::heap();
  65 
  66   double last_shrink_time = os::elapsedTime();
  67 
  68   // Shrink period avoids constantly polling regions for shrinking.
  69   // Having a period 10x lower than the delay would mean we hit the
  70   // shrinking with lag of less than 1/10-th of true delay.
  71   // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
  72   double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
  73 
  74   while (!in_graceful_shutdown() && !should_terminate()) {
  75     bool partial_gc_requested = heap->shenandoahPolicy()->should_start_partial_gc();
  76     bool conc_gc_requested = is_conc_gc_requested() || heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity());
  77     bool full_gc_requested = is_full_gc();
  78     bool gc_requested = partial_gc_requested || conc_gc_requested || full_gc_requested;
  79 
  80     if (full_gc_requested) {
  81       service_fullgc_cycle();
  82     } else if (partial_gc_requested) {
  83       service_partial_cycle();
  84     } else if (conc_gc_requested) {
  85       service_normal_cycle();
  86     }
  87 
  88     if (gc_requested) {
  89       // Update counters when GC was requested
  90       do_counters_update();
  91 
  92       // Coming out of (cancelled) concurrent GC, reset these for sanity
  93       if (heap->is_evacuation_in_progress()) {
  94         heap->set_evacuation_in_progress_concurrently(false);
  95       }
  96 
  97       if (heap->is_update_refs_in_progress()) {
  98         heap->set_update_refs_in_progress(false);
  99       }
 100 
 101       reset_conc_gc_requested();
 102     } else {
 103       Thread::current()->_ParkEvent->park(10);
 104     }
 105 
 106     // Try to uncommit stale regions
 107     double current = os::elapsedTime();
 108     if (current - last_shrink_time > shrink_period) {
 109       heap->handle_heap_shrinkage();
 110       last_shrink_time = current;
 111     }
 112 
 113     // Make sure the _do_full_gc flag changes are seen.
 114     OrderAccess::storeload();
 115   }
 116 
 117   // Wait for the actual stop(), can't leave run_service() earlier.
 118   while (!should_terminate()) {
 119     Thread::current()->_ParkEvent->park(10);
 120   }
 121 }
 122 
 123 void ShenandoahConcurrentThread::service_partial_cycle() {
 124   GCIdMark gc_id_mark;
 125 
 126   ShenandoahHeap* heap = ShenandoahHeap::heap();
 127   TraceCollectorStats tcs(heap->monitoring_support()->partial_collection_counters());
 128 
 129   {
 130     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 131     ShenandoahGCPhase partial_phase(ShenandoahPhaseTimings::init_partial_gc_gross);
 132     VM_ShenandoahInitPartialGC init_partial_gc;
 133     VMThread::execute(&init_partial_gc);
 134   }
 135 
 136   {
 137     GCTraceTime(Info, gc) time("Concurrent partial GC", heap->gc_timer(), GCCause::_no_gc, true);
 138     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 139     ShenandoahHeap::heap()->partial_gc()->concurrent_partial_collection();
 140   }
 141 
 142   {
 143     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 144     ShenandoahGCPhase partial_phase(ShenandoahPhaseTimings::final_partial_gc_gross);
 145     VM_ShenandoahFinalPartialGC final_partial_gc;
 146     VMThread::execute(&final_partial_gc);
 147   }
 148 
 149   {
 150     GCTraceTime(Info, gc) time("Concurrent cleanup", heap->gc_timer(), GCCause::_no_gc, true);
 151     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 152     ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 153     heap->recycle_trash();
 154   }
 155 
 156   // TODO: Call this properly with Shenandoah*CycleMark
 157   heap->set_used_at_last_gc();
 158 }
 159 
 160 void ShenandoahConcurrentThread::service_normal_cycle() {
 161   if (check_cancellation()) return;
 162 
 163   ShenandoahHeap* heap = ShenandoahHeap::heap();
 164 
 165   GCTimer* gc_timer = heap->gc_timer();
 166 
 167   ShenandoahGCSession session;
 168 
 169   // Cycle started
 170   heap->shenandoahPolicy()->record_cycle_start();
 171 
 172   // Capture peak occupancy right after starting the cycle
 173   heap->shenandoahPolicy()->record_peak_occupancy();
 174 
 175   GCIdMark gc_id_mark;
 176   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 177   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 178 
 179   // Mark requires clean bitmaps. Clear them here, before diving into STW.
 180   // There is a potential race from this moment on to TAMS reset in init mark: the bitmaps
 181   // would be clear, but TAMS not yet updated.
 182   {
 183     GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 184     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 185     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
 186     WorkGang *workers = heap->workers();
 187     ShenandoahPushWorkerScope scope(workers, ConcGCThreads);
 188     heap->reset_mark_bitmap(workers);
 189   }
 190 
 191   // Start initial mark under STW:
 192   {
 193     // Workers are setup by VM_ShenandoahInitMark
 194     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 195     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 196     ShenandoahGCPhase init_mark_phase(ShenandoahPhaseTimings::init_mark_gross);
 197     VM_ShenandoahInitMark initMark;
 198     VMThread::execute(&initMark);
 199   }
 200 
 201   if (check_cancellation()) return;
 202 
 203   // Continue concurrent mark:
 204   {
 205     // Setup workers for concurrent marking phase
 206     WorkGang* workers = heap->workers();
 207     uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_marking();
 208     ShenandoahWorkerScope scope(workers, n_workers);
 209 
 210     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);
 211     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 212     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 213   }
 214 
 215   // Allocations happen during concurrent mark, record peak after the phase:
 216   heap->shenandoahPolicy()->record_peak_occupancy();
 217 
 218   // Possibly hand over remaining marking work to final-mark phase.
 219   bool clear_full_gc = false;
 220   if (heap->cancelled_concgc()) {
 221     heap->shenandoahPolicy()->record_cm_cancelled();
 222     if (_full_gc_cause == GCCause::_allocation_failure &&
 223         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 224       heap->clear_cancelled_concgc();
 225       clear_full_gc = true;
 226       heap->shenandoahPolicy()->record_cm_degenerated();
 227     } else {
 228       return;
 229     }
 230   } else {
 231     heap->shenandoahPolicy()->record_cm_success();
 232 
 233     // If not cancelled, can try to concurrently pre-clean
 234     if (ShenandoahPreclean) {
 235       if (heap->concurrentMark()->process_references()) {
 236         GCTraceTime(Info, gc) time("Concurrent precleaning", gc_timer, GCCause::_no_gc, true);
 237         ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
 238         heap->concurrentMark()->preclean_weak_refs();
 239 
 240         // Allocations happen during concurrent preclean, record peak after the phase:
 241         heap->shenandoahPolicy()->record_peak_occupancy();
 242       }
 243     }
 244   }
 245 
 246   // Proceed to complete marking under STW, and start evacuation:
 247   {
 248     // Workers are setup by VM_ShenandoahFinalMarkStartEvac
 249     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 250     ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 251     ShenandoahGCPhase final_mark_phase(ShenandoahPhaseTimings::final_mark_gross);
 252     VM_ShenandoahFinalMarkStartEvac finishMark;
 253     VMThread::execute(&finishMark);
 254   }
 255 
 256   if (check_cancellation()) return;
 257 
 258   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 259   if (clear_full_gc) {
 260     reset_full_gc();
 261   }
 262 
 263   // Final mark had reclaimed some immediate garbage, kick cleanup to reclaim the space.
 264   {
 265     GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 266     ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 267     ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 268     heap->recycle_trash();
 269   }
 270 
 271   // Perform concurrent evacuation, if required.
 272   // This phase can be skipped if there is nothing to evacuate. If so, evac_in_progress would be unset
 273   // by collection set preparation code.
 274   if (heap->is_evacuation_in_progress()) {
 275 
 276     // Setup workers for concurrent evacuation phase
 277     WorkGang* workers = heap->workers();
 278     uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_evac();
 279     ShenandoahWorkerScope scope(workers, n_workers);
 280 
 281     GCTraceTime(Info, gc) time("Concurrent evacuation ", gc_timer, GCCause::_no_gc, true);
 282     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 283     heap->do_evacuation();
 284 
 285     // Allocations happen during evacuation, record peak after the phase:
 286     heap->shenandoahPolicy()->record_peak_occupancy();
 287 
 288     if (check_cancellation()) return;
 289   }
 290 
 291   // Perform update-refs phase, if required.
 292   // This phase can be skipped if there was nothing evacuated. If so, need_update_refs would be unset
 293   // by collection set preparation code. However, adaptive heuristics need to record "success" when
 294   // this phase is skipped. Therefore, we conditionally execute all ops, leaving heuristics adjustments
 295   // intact.
 296   if (heap->shenandoahPolicy()->should_start_update_refs()) {
 297 
 298     bool do_it = heap->need_update_refs();
 299     if (do_it) {
 300       {
 301         TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 302         ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause_gross);
 303         ShenandoahGCPhase init_update_refs_phase(ShenandoahPhaseTimings::init_update_refs_gross);
 304         VM_ShenandoahInitUpdateRefs init_update_refs;
 305         VMThread::execute(&init_update_refs);
 306       }
 307 
 308       {
 309         GCTraceTime(Info, gc) time("Concurrent update references ", gc_timer, GCCause::_no_gc, true);
 310         WorkGang* workers = heap->workers();
 311         uint n_workers = ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref();
 312         ShenandoahWorkerScope scope(workers, n_workers);
 313         heap->concurrent_update_heap_references();
 314       }
 315     }
 316 
 317     // Allocations happen during update-refs, record peak after the phase:
 318     heap->shenandoahPolicy()->record_peak_occupancy();
 319 
 320     clear_full_gc = false;
 321     if (heap->cancelled_concgc()) {
 322       heap->shenandoahPolicy()->record_uprefs_cancelled();
 323       if (_full_gc_cause == GCCause::_allocation_failure &&
 324           heap->shenandoahPolicy()->handover_cancelled_uprefs()) {
 325         clear_full_gc = true;
 326         heap->shenandoahPolicy()->record_uprefs_degenerated();
 327       } else {
 328         return;
 329       }
 330     } else {
 331       heap->shenandoahPolicy()->record_uprefs_success();
 332     }
 333 
 334     if (do_it) {
 335       TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 336       ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
 337       ShenandoahGCPhase final_update_refs_phase(ShenandoahPhaseTimings::final_update_refs_gross);
 338       VM_ShenandoahFinalUpdateRefs final_update_refs;
 339       VMThread::execute(&final_update_refs);
 340     }
 341 
 342     if (do_it) {
 343       GCTraceTime(Info, gc) time("Concurrent cleanup", gc_timer, GCCause::_no_gc, true);
 344       ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
 345       ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
 346       heap->recycle_trash();
 347     }
 348 
 349     // Allocations happen during bitmap cleanup, record peak after the phase:
 350     heap->shenandoahPolicy()->record_peak_occupancy();
 351 
 352   } else {
 353     // If update-refs were skipped, need to do another verification pass after evacuation.
 354     if (ShenandoahVerify && !check_cancellation()) {
 355       VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
 356       VMThread::execute(&verify_after_evacuation);
 357     }
 358   }
 359 
 360   // Prepare for the next normal cycle:
 361   if (check_cancellation()) return;
 362 
 363   if (clear_full_gc) {
 364     reset_full_gc();
 365   }
 366 
 367   // Cycle is complete
 368   heap->shenandoahPolicy()->record_cycle_end();
 369 
 370   // TODO: Call this properly with Shenandoah*CycleMark
 371   heap->set_used_at_last_gc();
 372 }
 373 
 374 bool ShenandoahConcurrentThread::check_cancellation() {
 375   ShenandoahHeap* heap = ShenandoahHeap::heap();
 376   if (heap->cancelled_concgc()) {
 377     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 378     return true;
 379   }
 380   return false;
 381 }
 382 
 383 
 384 void ShenandoahConcurrentThread::stop_service() {
 385   // Nothing to do here.
 386 }
 387 
 388 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 389   GCIdMark gc_id_mark;
 390   ShenandoahHeap* heap = ShenandoahHeap::heap();
 391 
 392   {
 393     if (_full_gc_cause == GCCause::_allocation_failure) {
 394       heap->shenandoahPolicy()->record_allocation_failure_gc();
 395     } else {
 396       heap->shenandoahPolicy()->record_user_requested_gc();
 397     }
 398 
 399     TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
 400     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 401     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 402     VMThread::execute(&full_gc);
 403   }
 404 
 405   reset_full_gc();
 406 }
 407 
 408 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 409   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 410 
 411   if (try_set_full_gc()) {
 412     _full_gc_cause = cause;
 413 
 414     // Now that full GC is scheduled, we can abort everything else
 415     ShenandoahHeap::heap()->cancel_concgc(cause);
 416   } else {
 417     GCCause::Cause last_cause = _full_gc_cause;
 418     if (last_cause != cause) {
 419       switch (cause) {
 420         // These GC causes take precedence:
 421         case GCCause::_allocation_failure:
 422           log_info(gc)("Full GC was already pending with cause: %s; new cause is %s, overwriting",
 423                        GCCause::to_string(last_cause),
 424                        GCCause::to_string(cause));
 425           _full_gc_cause = cause;
 426           break;
 427         // Other GC causes can be ignored
 428         default:
 429           log_info(gc)("Full GC is already pending with cause: %s; new cause was %s, ignoring",
 430                        GCCause::to_string(last_cause),
 431                        GCCause::to_string(cause));
 432           break;
 433       }
 434     }
 435   }
 436 
 437   MonitorLockerEx ml(&_full_gc_lock);
 438   while (is_full_gc()) {
 439     ml.wait();
 440   }
 441   assert(!is_full_gc(), "expect full GC to have completed");
 442 }
 443 
 444 void ShenandoahConcurrentThread::reset_full_gc() {
 445   OrderAccess::release_store_fence(&_do_full_gc, 0);
 446   MonitorLockerEx ml(&_full_gc_lock);
 447   ml.notify_all();
 448 }
 449 
 450 bool ShenandoahConcurrentThread::try_set_full_gc() {
 451   jbyte old = Atomic::cmpxchg((jbyte)1, &_do_full_gc, (jbyte)0);
 452   return old == 0; // success
 453 }
 454 
 455 bool ShenandoahConcurrentThread::is_full_gc() {
 456   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 457 }
 458 
 459 bool ShenandoahConcurrentThread::is_conc_gc_requested() {
 460   return OrderAccess::load_acquire(&_do_concurrent_gc) == 1;
 461 }
 462 
 463 void ShenandoahConcurrentThread::do_conc_gc() {
 464   OrderAccess::release_store_fence(&_do_concurrent_gc, 1);
 465   MonitorLockerEx ml(&_conc_gc_lock);
 466   ml.wait();
 467 }
 468 
 469 void ShenandoahConcurrentThread::reset_conc_gc_requested() {
 470   OrderAccess::release_store_fence(&_do_concurrent_gc, 0);
 471   MonitorLockerEx ml(&_conc_gc_lock);
 472   ml.notify_all();
 473 }
 474 
 475 void ShenandoahConcurrentThread::do_counters_update() {
 476   if (OrderAccess::load_acquire(&_do_counters_update) == 1) {
 477     OrderAccess::release_store(&_do_counters_update, 0);
 478     ShenandoahHeap::heap()->monitoring_support()->update_counters();
 479   }
 480 }
 481 
 482 void ShenandoahConcurrentThread::trigger_counters_update() {
 483   if (OrderAccess::load_acquire(&_do_counters_update) == 0) {
 484     OrderAccess::release_store(&_do_counters_update, 1);
 485   }
 486 }
 487 
 488 void ShenandoahConcurrentThread::print() const {
 489   print_on(tty);
 490 }
 491 
 492 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 493   st->print("Shenandoah Concurrent Thread");
 494   Thread::print_on(st);
 495   st->cr();
 496 }
 497 
 498 void ShenandoahConcurrentThread::start() {
 499   create_and_start();
 500 }
 501 
 502 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 503   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 504 }
 505 
 506 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 507   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 508 }