1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcTraceTime.inline.hpp"
  25 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  26 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  30 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  31 #include "memory/iterator.hpp"
  32 #include "memory/universe.hpp"
  33 #include "runtime/vmThread.hpp"
  34 
  35 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  36   ConcurrentGCThread(),
  37   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true, Monitor::_safepoint_check_always),
  38   _do_full_gc(false),
  39   _graceful_shutdown(0)
  40 {
  41   create_and_start();
  42 }
  43 
  44 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  45   // This is here so that super is called.
  46 }
  47 
  48 void ShenandoahConcurrentThread::run_service() {
  49   ShenandoahHeap* heap = ShenandoahHeap::heap();
  50 
  51   while (!should_terminate()) {
  52     if (in_graceful_shutdown()) {
  53       break;
  54     } else if (is_full_gc()) {
  55       service_fullgc_cycle();
  56     } else if (heap->shenandoahPolicy()->should_start_partial_gc()) {
  57       service_partial_cycle();
  58     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  59       service_normal_cycle();
  60       if (heap->is_evacuation_in_progress()) {
  61         heap->set_evacuation_in_progress_concurrently(false);
  62       }
  63     } else {
  64       Thread::current()->_ParkEvent->park(10);
  65     }
  66     heap->monitoring_support()->update_counters();
  67 
  68     // Make sure the _do_full_gc flag changes are seen.
  69     OrderAccess::storeload();
  70   }
  71 
  72   // Wait for the actual stop(), can't leave run_service() earlier.
  73   while (!should_terminate()) {
  74     Thread::current()->_ParkEvent->park(10);
  75   }
  76 }
  77 
  78 void ShenandoahConcurrentThread::service_partial_cycle() {
  79   GCIdMark gc_id_mark;
  80   VM_ShenandoahPartialGC partial_gc;
  81   VMThread::execute(&partial_gc);
  82 }
  83 
  84 void ShenandoahConcurrentThread::service_normal_cycle() {
  85   if (check_cancellation()) return;
  86 
  87   ShenandoahHeap* heap = ShenandoahHeap::heap();
  88 
  89   GCTimer* gc_timer = heap->gc_timer();
  90 
  91   gc_timer->register_gc_start();
  92 
  93   heap->shenandoahPolicy()->increase_cycle_counter();
  94 
  95   GCIdMark gc_id_mark;
  96   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
  97   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
  98 
  99   // Start initial mark under STW:
 100   {
 101     // Workers are setup by VM_ShenandoahInitMark
 102     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 103     VM_ShenandoahInitMark initMark;
 104     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 105     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
 106     VMThread::execute(&initMark);
 107     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
 108     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 109   }
 110 
 111   if (check_cancellation()) return;
 112 
 113   // Continue concurrent mark:
 114   {
 115     // Setup workers for concurrent marking phase
 116     WorkGang* workers = heap->workers();
 117     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_marking(workers->active_workers(),
 118       Threads::number_of_non_daemon_threads());
 119     ShenandoahWorkerScope scope(workers, n_workers);
 120 
 121     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);
 122     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 123     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 124   }
 125 
 126   // Possibly hand over remaining marking work to final-mark phase.
 127   bool clear_full_gc = false;
 128   if (heap->cancelled_concgc()) {
 129     heap->shenandoahPolicy()->record_cm_cancelled();
 130     if (_full_gc_cause == GCCause::_allocation_failure &&
 131         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 132       heap->set_cancelled_concgc(false);
 133       clear_full_gc = true;
 134       heap->shenandoahPolicy()->record_cm_degenerated();
 135     } else {
 136       heap->gc_timer()->register_gc_end();
 137       return;
 138     }
 139   } else {
 140     heap->shenandoahPolicy()->record_cm_success();
 141   }
 142 
 143   // Proceed to complete marking under STW, and start evacuation:
 144   {
 145     // Workers are setup by VM_ShenandoahStartEvacuation
 146     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 147     VM_ShenandoahStartEvacuation finishMark;
 148     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 149     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark_gross);
 150     VMThread::execute(&finishMark);
 151     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark_gross);
 152     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 153   }
 154 
 155   if (check_cancellation()) return;
 156 
 157   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 158   if (clear_full_gc) {
 159     reset_full_gc();
 160   }
 161 
 162   // Continue concurrent evacuation:
 163   {
 164     // Setup workers for concurrent evacuation phase
 165     WorkGang* workers = heap->workers();
 166     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(workers->active_workers(),
 167       Threads::number_of_non_daemon_threads());
 168     ShenandoahWorkerScope scope(workers, n_workers);
 169 
 170     GCTraceTime(Info, gc) time("Concurrent evacuation ", gc_timer, GCCause::_no_gc, true);
 171     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 172     heap->do_evacuation();
 173   }
 174 
 175   // Prepare for the next normal cycle:
 176   if (check_cancellation()) return;
 177 
 178   {
 179     GCTraceTime(Info, gc) time("Concurrent reset bitmaps", gc_timer, GCCause::_no_gc);
 180     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 181     WorkGang* workers = heap->workers();
 182     ShenandoahPushWorkerScope scope(workers, heap->max_workers());
 183     heap->reset_next_mark_bitmap(workers);
 184     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 185   }
 186 
 187   gc_timer->register_gc_end();
 188 }
 189 
 190 bool ShenandoahConcurrentThread::check_cancellation() {
 191   ShenandoahHeap* heap = ShenandoahHeap::heap();
 192   if (heap->cancelled_concgc()) {
 193     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 194     heap->gc_timer()->register_gc_end();
 195     return true;
 196   }
 197   return false;
 198 }
 199 
 200 
 201 void ShenandoahConcurrentThread::stop_service() {
 202   // Nothing to do here.
 203 }
 204 
 205 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 206   GCIdMark gc_id_mark;
 207   ShenandoahHeap* heap = ShenandoahHeap::heap();
 208 
 209   {
 210     if (_full_gc_cause == GCCause::_allocation_failure) {
 211       heap->shenandoahPolicy()->record_allocation_failure_gc();
 212     } else {
 213       heap->shenandoahPolicy()->record_user_requested_gc();
 214     }
 215 
 216     TraceCollectorStats tcs(heap->monitoring_support()->full_collection_counters());
 217     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 218     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 219     VMThread::execute(&full_gc);
 220   }
 221 
 222   reset_full_gc();
 223 }
 224 
 225 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 226   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 227 
 228   if (try_set_full_gc()) {
 229     _full_gc_cause = cause;
 230 
 231     // Now that full GC is scheduled, we can abort everything else
 232     ShenandoahHeap::heap()->cancel_concgc(cause);
 233   } else {
 234     if (_full_gc_cause != cause) {
 235       log_info(gc)("Full GC is already pending with cause: %s; new cause is %s",
 236                    GCCause::to_string(_full_gc_cause),
 237                    GCCause::to_string(cause));
 238     }
 239   }
 240 
 241   MonitorLockerEx ml(&_full_gc_lock);
 242   while (is_full_gc()) {
 243     ml.wait();
 244   }
 245   assert(!is_full_gc(), "expect full GC to have completed");
 246 }
 247 
 248 void ShenandoahConcurrentThread::reset_full_gc() {
 249   OrderAccess::release_store_fence(&_do_full_gc, 0);
 250   MonitorLockerEx ml(&_full_gc_lock);
 251   ml.notify_all();
 252 }
 253 
 254 bool ShenandoahConcurrentThread::try_set_full_gc() {
 255   jbyte old = Atomic::cmpxchg(1, &_do_full_gc, 0);
 256   return old == 0; // success
 257 }
 258 
 259 bool ShenandoahConcurrentThread::is_full_gc() {
 260   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 261 }
 262 
 263 void ShenandoahConcurrentThread::print() const {
 264   print_on(tty);
 265 }
 266 
 267 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 268   st->print("Shenandoah Concurrent Thread");
 269   Thread::print_on(st);
 270   st->cr();
 271 }
 272 
 273 void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
 274   assert(false, "Wake up in the GC thread that never sleeps :-)");
 275 }
 276 
 277 void ShenandoahConcurrentThread::start() {
 278   create_and_start();
 279 }
 280 
 281 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 282   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 283 }
 284 
 285 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 286   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 287 }