1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc_implementation/shared/gcTimer.hpp"
  26 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahConcurrentThread.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  32 #include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
  33 #include "memory/iterator.hpp"
  34 #include "memory/universe.hpp"
  35 #include "runtime/vmThread.hpp"
  36 
  37 SurrogateLockerThread* ShenandoahConcurrentThread::_slt = NULL;
  38 
  39 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  40   ConcurrentGCThread(),
  41   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true),
  42   _do_full_gc(false),
  43   _graceful_shutdown(0)
  44 {
  45   create_and_start();
  46 }
  47 
  48 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  49   // This is here so that super is called.
  50 }
  51 
  52 void ShenandoahConcurrentThread::run() {
  53   initialize_in_thread();
  54 
  55   wait_for_universe_init();
  56 
  57   // Wait until we have the surrogate locker thread in place.
  58   {
  59     MutexLockerEx x(CGC_lock, true);
  60     while(_slt == NULL && !_should_terminate) {
  61       CGC_lock->wait(true, 200);
  62     }
  63   }
  64 
  65   ShenandoahHeap* heap = ShenandoahHeap::heap();
  66 
  67   while (! _should_terminate) {
  68     if (in_graceful_shutdown()) {
  69       break;
  70     } else if (is_full_gc()) {
  71       service_fullgc_cycle();
  72     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  73       service_normal_cycle();
  74       if (heap->is_evacuation_in_progress()) {
  75         heap->set_evacuation_in_progress_concurrently(false);
  76       }
  77     } else {
  78       Thread::current()->_ParkEvent->park(10);
  79     }
  80     heap->monitoring_support()->update_counters();
  81 
  82     // Make sure the _do_full_gc flag changes are seen.
  83     OrderAccess::storeload();
  84   }
  85 
  86   // Wait for the actual stop(), can't leave run_service() earlier.
  87   while (! _should_terminate) {
  88     Thread::current()->_ParkEvent->park(10);
  89   }
  90   terminate();
  91 }
  92 
  93 void ShenandoahConcurrentThread::service_normal_cycle() {
  94   if (check_cancellation()) return;
  95 
  96   ShenandoahHeap* heap = ShenandoahHeap::heap();
  97 
  98   GCTimer* gc_timer = heap->gc_timer();
  99   GCTracer* gc_tracer = heap->tracer();
 100 
 101   gc_timer->register_gc_start();
 102   gc_tracer->report_gc_start(GCCause::_no_cause_specified, gc_timer->gc_start());
 103 
 104   heap->shenandoahPolicy()->increase_cycle_counter();
 105 
 106   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 107   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
 108 
 109   // Start initial mark under STW:
 110   {
 111     // Workers are setup by VM_ShenandoahInitMark
 112     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 113     VM_ShenandoahInitMark initMark;
 114     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 115     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
 116     VMThread::execute(&initMark);
 117     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
 118     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 119   }
 120 
 121   if (check_cancellation()) return;
 122 
 123   // Continue concurrent mark:
 124   {
 125     // Setup workers for concurrent marking phase
 126     FlexibleWorkGang* workers = heap->workers();
 127     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_marking(workers->active_workers(),
 128                                                                               (uint) Threads::number_of_non_daemon_threads());
 129     ShenandoahWorkerScope scope(workers, n_workers);
 130 
 131     GCTraceTime time("Concurrent marking", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 132     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 133     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 134   }
 135 
 136   // Possibly hand over remaining marking work to final-mark phase.
 137   bool clear_full_gc = false;
 138   if (heap->cancelled_concgc()) {
 139     heap->shenandoahPolicy()->record_cm_cancelled();
 140     if (_full_gc_cause == GCCause::_allocation_failure &&
 141         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 142       heap->clear_cancelled_concgc();
 143       clear_full_gc = true;
 144       heap->shenandoahPolicy()->record_cm_degenerated();
 145     } else {
 146       heap->gc_timer()->register_gc_end();
 147       return;
 148     }
 149   } else {
 150     heap->shenandoahPolicy()->record_cm_success();
 151   }
 152 
 153   // Proceed to complete marking under STW, and start evacuation:
 154   {
 155     // Workers are setup by VM_ShenandoahFinalMarkStartEvac
 156     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 157     VM_ShenandoahFinalMarkStartEvac finishMark;
 158     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 159     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark_gross);
 160     VMThread::execute(&finishMark);
 161     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark_gross);
 162     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 163   }
 164 
 165   if (check_cancellation()) return;
 166 
 167   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 168   if (clear_full_gc) {
 169     reset_full_gc();
 170   }
 171 
 172   // Continue concurrent evacuation:
 173   {
 174     // Setup workers for concurrent evacuation phase
 175     FlexibleWorkGang* workers = heap->workers();
 176     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(workers->active_workers(),
 177                                                                                  (uint) Threads::number_of_non_daemon_threads());
 178     ShenandoahWorkerScope scope(workers, n_workers);
 179 
 180     GCTraceTime time("Concurrent evacuation", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true);
 181     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 182     heap->do_evacuation();
 183   }
 184 
 185   // Prepare for the next normal cycle:
 186   if (check_cancellation()) return;
 187 
 188   {
 189     GCTraceTime time("Concurrent reset bitmaps", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id());
 190     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 191     FlexibleWorkGang* workers = heap->workers();
 192     ShenandoahPushWorkerScope scope(workers, heap->max_workers());
 193     heap->reset_next_mark_bitmap(workers);
 194     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 195   }
 196 
 197   gc_timer->register_gc_end();
 198   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 199 }
 200 
 201 bool ShenandoahConcurrentThread::check_cancellation() {
 202   ShenandoahHeap* heap = ShenandoahHeap::heap();
 203   if (heap->cancelled_concgc()) {
 204     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 205     heap->gc_timer()->register_gc_end();
 206     return true;
 207   }
 208   return false;
 209 }
 210 
 211 
 212 void ShenandoahConcurrentThread::stop() {
 213   {
 214     MutexLockerEx ml(Terminator_lock);
 215     _should_terminate = true;
 216   }
 217 
 218   {
 219     MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
 220     CGC_lock->notify_all();
 221   }
 222 
 223   {
 224     MutexLockerEx ml(Terminator_lock);
 225     while (!_has_terminated) {
 226       Terminator_lock->wait();
 227     }
 228   }
 229 }
 230 
 231 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 232   ShenandoahHeap* heap = ShenandoahHeap::heap();
 233 
 234   {
 235     if (_full_gc_cause == GCCause::_allocation_failure) {
 236       heap->shenandoahPolicy()->record_allocation_failure_gc();
 237     } else {
 238       heap->shenandoahPolicy()->record_user_requested_gc();
 239     }
 240 
 241     TraceCollectorStats tcs(heap->monitoring_support()->full_collection_counters());
 242     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 243     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 244     VMThread::execute(&full_gc);
 245   }
 246 
 247   reset_full_gc();
 248 }
 249 
 250 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 251   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 252 
 253   if (try_set_full_gc()) {
 254     _full_gc_cause = cause;
 255 
 256     // Now that full GC is scheduled, we can abort everything else
 257     ShenandoahHeap::heap()->cancel_concgc(cause);
 258   } else {
 259     if (_full_gc_cause != cause) {
 260       log_info(gc)("Full GC is already pending with cause: %s; new cause is %s",
 261                    GCCause::to_string(_full_gc_cause),
 262                    GCCause::to_string(cause));
 263     }
 264   }
 265 
 266   MonitorLockerEx ml(&_full_gc_lock);
 267   while (is_full_gc()) {
 268     ml.wait();
 269   }
 270   assert(!is_full_gc(), "expect full GC to have completed");
 271 }
 272 
 273 void ShenandoahConcurrentThread::reset_full_gc() {
 274   OrderAccess::release_store_fence(&_do_full_gc, 0);
 275   MonitorLockerEx ml(&_full_gc_lock);
 276   ml.notify_all();
 277 }
 278 
 279 bool ShenandoahConcurrentThread::try_set_full_gc() {
 280   jbyte old = Atomic::cmpxchg(1, &_do_full_gc, 0);
 281   return old == 0; // success
 282 }
 283 
 284 bool ShenandoahConcurrentThread::is_full_gc() {
 285   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 286 }
 287 
 288 void ShenandoahConcurrentThread::print() const {
 289   print_on(tty);
 290 }
 291 
 292 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 293   st->print("Shenandoah Concurrent Thread");
 294   Thread::print_on(st);
 295   st->cr();
 296 }
 297 
 298 void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
 299   assert(false, "Wake up in the GC thread that never sleeps :-)");
 300 }
 301 
 302 void ShenandoahConcurrentThread::start() {
 303   create_and_start();
 304 }
 305 
 306 void ShenandoahConcurrentThread::makeSurrogateLockerThread(TRAPS) {
 307   assert(UseShenandoahGC, "SLT thread needed only for concurrent GC");
 308   assert(THREAD->is_Java_thread(), "must be a Java thread");
 309   assert(_slt == NULL, "SLT already created");
 310   _slt = SurrogateLockerThread::make(THREAD);
 311 }
 312 
 313 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 314   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 315 }
 316 
 317 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 318   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 319 }