1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcTraceTime.inline.hpp"
  25 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  29 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  30 #include "memory/iterator.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/vmThread.hpp"
  33 
  34 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  35   ConcurrentGCThread(),
  36   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true, Monitor::_safepoint_check_always),
  37   _do_full_gc(false),
  38   _graceful_shutdown(0)
  39 {
  40   create_and_start();
  41 }
  42 
  43 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  44   // This is here so that super is called.
  45 }
  46 
  47 void ShenandoahConcurrentThread::run_service() {
  48   ShenandoahHeap* heap = ShenandoahHeap::heap();
  49 
  50   while (!should_terminate()) {
  51     if (in_graceful_shutdown()) {
  52       break;
  53     } else if (is_full_gc()) {
  54       service_fullgc_cycle();
  55     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  56       service_normal_cycle();
  57     } else {
  58       Thread::current()->_ParkEvent->park(10);
  59     }
  60     heap->monitoring_support()->update_counters();
  61 
  62     // Make sure the _do_full_gc flag changes are seen.
  63     OrderAccess::storeload();
  64   }
  65 
  66   // Wait for the actual stop(), can't leave run_service() earlier.
  67   while (!should_terminate()) {
  68     Thread::current()->_ParkEvent->park(10);
  69   }
  70 }
  71 
  72 void ShenandoahConcurrentThread::service_normal_cycle() {
  73   if (check_cancellation()) return;
  74 
  75   ShenandoahHeap* heap = ShenandoahHeap::heap();
  76 
  77   GCTimer* gc_timer = heap->gc_timer();
  78 
  79   gc_timer->register_gc_start();
  80 
  81   heap->shenandoahPolicy()->increase_cycle_counter();
  82 
  83   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
  84   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
  85 
  86   // Start initial mark under STW:
  87   {
  88     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
  89     VM_ShenandoahInitMark initMark;
  90     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
  91     VMThread::execute(&initMark);
  92     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
  93   }
  94 
  95   if (check_cancellation()) return;
  96 
  97   // Continue concurrent mark:
  98   {
  99     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);
 100     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 101     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 102   }
 103 
 104   // Possibly hand over remaining marking work to final-mark phase.
 105   bool clear_full_gc = false;
 106   if (heap->cancelled_concgc()) {
 107     heap->shenandoahPolicy()->record_cm_cancelled();
 108     if (_full_gc_cause == GCCause::_allocation_failure &&
 109         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 110       heap->set_cancelled_concgc(false);
 111       clear_full_gc = true;
 112       heap->shenandoahPolicy()->record_cm_degenerated();
 113     } else {
 114       heap->gc_timer()->register_gc_end();
 115       return;
 116     }
 117   } else {
 118     heap->shenandoahPolicy()->record_cm_success();
 119   }
 120 
 121   // Proceed to complete marking under STW, and start evacuation:
 122   {
 123     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 124     VM_ShenandoahStartEvacuation finishMark;
 125     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark_gross);
 126     VMThread::execute(&finishMark);
 127     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark_gross);
 128   }
 129 
 130   if (check_cancellation()) return;
 131 
 132   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 133   if (clear_full_gc) {
 134     reset_full_gc();
 135   }
 136 
 137   // Continue concurrent evacuation:
 138   {
 139     GCTraceTime(Info, gc) time("Concurrent evacuation ", gc_timer, GCCause::_no_gc, true);
 140     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 141     heap->do_evacuation();
 142   }
 143 
 144   // Prepare for the next normal cycle:
 145   if (heap->is_evacuation_in_progress()) {
 146     MutexLocker mu(Threads_lock);
 147     heap->set_evacuation_in_progress(false);
 148   }
 149 
 150   if (check_cancellation()) return;
 151 
 152   heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 153   heap->reset_next_mark_bitmap(heap->conc_workers());
 154   heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 155 
 156   gc_timer->register_gc_end();
 157 }
 158 
 159 bool ShenandoahConcurrentThread::check_cancellation() {
 160   ShenandoahHeap* heap = ShenandoahHeap::heap();
 161   if (heap->cancelled_concgc()) {
 162     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 163     heap->gc_timer()->register_gc_end();
 164     return true;
 165   }
 166   return false;
 167 }
 168 
 169 
 170 void ShenandoahConcurrentThread::stop_service() {
 171   // Nothing to do here.
 172 }
 173 
 174 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 175   ShenandoahHeap* heap = ShenandoahHeap::heap();
 176 
 177   {
 178     if (_full_gc_cause == GCCause::_allocation_failure) {
 179       heap->shenandoahPolicy()->record_allocation_failure_gc();
 180     } else {
 181       heap->shenandoahPolicy()->record_user_requested_gc();
 182     }
 183 
 184     TraceCollectorStats tcs(heap->monitoring_support()->full_collection_counters());
 185     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 186     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 187     VMThread::execute(&full_gc);
 188   }
 189 
 190   reset_full_gc();
 191 
 192 }
 193 
 194 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 195   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 196 
 197   if (try_set_full_gc()) {
 198     _full_gc_cause = cause;
 199 
 200     // Now that full GC is scheduled, we can abort everything else
 201     ShenandoahHeap::heap()->cancel_concgc(cause);
 202   } else {
 203     if (_full_gc_cause != cause) {
 204       log_info(gc)("Full GC is already pending with cause: %s; new cause is %s",
 205                    GCCause::to_string(_full_gc_cause),
 206                    GCCause::to_string(cause));
 207     }
 208   }
 209 
 210   MonitorLockerEx ml(&_full_gc_lock);
 211   while (is_full_gc()) {
 212     ml.wait();
 213   }
 214   assert(!is_full_gc(), "expect full GC to have completed");
 215 }
 216 
 217 void ShenandoahConcurrentThread::reset_full_gc() {
 218   OrderAccess::release_store_fence(&_do_full_gc, 0);
 219   MonitorLockerEx ml(&_full_gc_lock);
 220   ml.notify_all();
 221 }
 222 
 223 bool ShenandoahConcurrentThread::try_set_full_gc() {
 224   jbyte old = Atomic::cmpxchg(1, &_do_full_gc, 0);
 225   return old == 0; // success
 226 }
 227 
 228 bool ShenandoahConcurrentThread::is_full_gc() {
 229   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 230 }
 231 
 232 void ShenandoahConcurrentThread::print() const {
 233   print_on(tty);
 234 }
 235 
 236 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 237   st->print("Shenandoah Concurrent Thread");
 238   Thread::print_on(st);
 239   st->cr();
 240 }
 241 
 242 void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
 243   assert(false, "Wake up in the GC thread that never sleeps :-)");
 244 }
 245 
 246 void ShenandoahConcurrentThread::start() {
 247   create_and_start();
 248 }
 249 
 250 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 251   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 252 }
 253 
 254 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 255   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 256 }