1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcTraceTime.inline.hpp"
  25 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  29 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  30 #include "memory/iterator.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/vmThread.hpp"
  33 
  34 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  35   ConcurrentGCThread(),
  36   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true, Monitor::_safepoint_check_always),
  37   _do_full_gc(false),
  38   _graceful_shutdown(0)
  39 {
  40   create_and_start();
  41 }
  42 
  43 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  44   // This is here so that super is called.
  45 }
  46 
  47 void ShenandoahConcurrentThread::run_service() {
  48   ShenandoahHeap* heap = ShenandoahHeap::heap();
  49 
  50   while (!should_terminate()) {
  51     if (in_graceful_shutdown()) {
  52       break;
  53     } else if (is_full_gc()) {
  54       service_fullgc_cycle();
  55     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  56       service_normal_cycle();
  57       if (heap->is_evacuation_in_progress()) {
  58         MutexLocker mu(Threads_lock);
  59         heap->set_evacuation_in_progress(false);
  60       }
  61     } else {
  62       Thread::current()->_ParkEvent->park(10);
  63     }
  64     heap->monitoring_support()->update_counters();
  65 
  66     // Make sure the _do_full_gc flag changes are seen.
  67     OrderAccess::storeload();
  68   }
  69 
  70   // Wait for the actual stop(), can't leave run_service() earlier.
  71   while (!should_terminate()) {
  72     Thread::current()->_ParkEvent->park(10);
  73   }
  74 }
  75 
  76 void ShenandoahConcurrentThread::service_normal_cycle() {
  77   if (check_cancellation()) return;
  78 
  79   ShenandoahHeap* heap = ShenandoahHeap::heap();
  80 
  81   GCTimer* gc_timer = heap->gc_timer();
  82 
  83   gc_timer->register_gc_start();
  84 
  85   heap->shenandoahPolicy()->increase_cycle_counter();
  86 
  87   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
  88   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
  89 
  90   // Start initial mark under STW:
  91   {
  92     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
  93     VM_ShenandoahInitMark initMark;
  94     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
  95     VMThread::execute(&initMark);
  96     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
  97   }
  98 
  99   if (check_cancellation()) return;
 100 
 101   // Continue concurrent mark:
 102   {
 103     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);
 104     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 105     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 106   }
 107 
 108   // Possibly hand over remaining marking work to final-mark phase.
 109   bool clear_full_gc = false;
 110   if (heap->cancelled_concgc()) {
 111     heap->shenandoahPolicy()->record_cm_cancelled();
 112     if (_full_gc_cause == GCCause::_allocation_failure &&
 113         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 114       heap->set_cancelled_concgc(false);
 115       clear_full_gc = true;
 116       heap->shenandoahPolicy()->record_cm_degenerated();
 117     } else {
 118       heap->gc_timer()->register_gc_end();
 119       return;
 120     }
 121   } else {
 122     heap->shenandoahPolicy()->record_cm_success();
 123   }
 124 
 125   // Proceed to complete marking under STW, and start evacuation:
 126   {
 127     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 128     VM_ShenandoahStartEvacuation finishMark;
 129     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark_gross);
 130     VMThread::execute(&finishMark);
 131     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark_gross);
 132   }
 133 
 134   if (check_cancellation()) return;
 135 
 136   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 137   if (clear_full_gc) {
 138     reset_full_gc();
 139   }
 140 
 141   // Continue concurrent evacuation:
 142   {
 143     GCTraceTime(Info, gc) time("Concurrent evacuation ", gc_timer, GCCause::_no_gc, true);
 144     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 145     heap->do_evacuation();
 146   }
 147 
 148   // Prepare for the next normal cycle:
 149   if (check_cancellation()) return;
 150 
 151   heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 152   heap->reset_next_mark_bitmap(heap->conc_workers());
 153   heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 154 
 155   gc_timer->register_gc_end();
 156 }
 157 
 158 bool ShenandoahConcurrentThread::check_cancellation() {
 159   ShenandoahHeap* heap = ShenandoahHeap::heap();
 160   if (heap->cancelled_concgc()) {
 161     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 162     heap->gc_timer()->register_gc_end();
 163     return true;
 164   }
 165   return false;
 166 }
 167 
 168 
 169 void ShenandoahConcurrentThread::stop_service() {
 170   // Nothing to do here.
 171 }
 172 
 173 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 174   ShenandoahHeap* heap = ShenandoahHeap::heap();
 175 
 176   {
 177     if (_full_gc_cause == GCCause::_allocation_failure) {
 178       heap->shenandoahPolicy()->record_allocation_failure_gc();
 179     } else {
 180       heap->shenandoahPolicy()->record_user_requested_gc();
 181     }
 182 
 183     TraceCollectorStats tcs(heap->monitoring_support()->full_collection_counters());
 184     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 185     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 186     VMThread::execute(&full_gc);
 187   }
 188 
 189   reset_full_gc();
 190 
 191 }
 192 
 193 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 194   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 195 
 196   if (try_set_full_gc()) {
 197     _full_gc_cause = cause;
 198 
 199     // Now that full GC is scheduled, we can abort everything else
 200     ShenandoahHeap::heap()->cancel_concgc(cause);
 201   } else {
 202     if (_full_gc_cause != cause) {
 203       log_info(gc)("Full GC is already pending with cause: %s; new cause is %s",
 204                    GCCause::to_string(_full_gc_cause),
 205                    GCCause::to_string(cause));
 206     }
 207   }
 208 
 209   MonitorLockerEx ml(&_full_gc_lock);
 210   while (is_full_gc()) {
 211     ml.wait();
 212   }
 213   assert(!is_full_gc(), "expect full GC to have completed");
 214 }
 215 
 216 void ShenandoahConcurrentThread::reset_full_gc() {
 217   OrderAccess::release_store_fence(&_do_full_gc, 0);
 218   MonitorLockerEx ml(&_full_gc_lock);
 219   ml.notify_all();
 220 }
 221 
 222 bool ShenandoahConcurrentThread::try_set_full_gc() {
 223   jbyte old = Atomic::cmpxchg(1, &_do_full_gc, 0);
 224   return old == 0; // success
 225 }
 226 
 227 bool ShenandoahConcurrentThread::is_full_gc() {
 228   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 229 }
 230 
 231 void ShenandoahConcurrentThread::print() const {
 232   print_on(tty);
 233 }
 234 
 235 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 236   st->print("Shenandoah Concurrent Thread");
 237   Thread::print_on(st);
 238   st->cr();
 239 }
 240 
 241 void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
 242   assert(false, "Wake up in the GC thread that never sleeps :-)");
 243 }
 244 
 245 void ShenandoahConcurrentThread::start() {
 246   create_and_start();
 247 }
 248 
 249 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 250   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 251 }
 252 
 253 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 254   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 255 }