1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcTraceTime.inline.hpp"
  26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  31 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  32 #include "memory/iterator.hpp"
  33 #include "memory/universe.hpp"
  34 #include "runtime/vmThread.hpp"
  35 
  36 ShenandoahConcurrentThread::ShenandoahConcurrentThread() :
  37   ConcurrentGCThread(),
  38   _full_gc_lock(Mutex::leaf, "ShenandoahFullGC_lock", true, Monitor::_safepoint_check_always),
  39   _do_full_gc(false),
  40   _graceful_shutdown(0)
  41 {
  42   create_and_start();
  43 }
  44 
  45 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() {
  46   // This is here so that super is called.
  47 }
  48 
  49 void ShenandoahConcurrentThread::run_service() {
  50   ShenandoahHeap* heap = ShenandoahHeap::heap();
  51 
  52   while (!should_terminate()) {
  53     if (in_graceful_shutdown()) {
  54       break;
  55     } else if (is_full_gc()) {
  56       service_fullgc_cycle();
  57     } else if (heap->shenandoahPolicy()->should_start_partial_gc()) {
  58       service_partial_cycle();
  59     } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) {
  60       service_normal_cycle();
  61       if (heap->is_evacuation_in_progress()) {
  62         heap->set_evacuation_in_progress_concurrently(false);
  63       }
  64     } else {
  65       Thread::current()->_ParkEvent->park(10);
  66     }
  67     heap->monitoring_support()->update_counters();
  68 
  69     // Make sure the _do_full_gc flag changes are seen.
  70     OrderAccess::storeload();
  71   }
  72 
  73   // Wait for the actual stop(), can't leave run_service() earlier.
  74   while (!should_terminate()) {
  75     Thread::current()->_ParkEvent->park(10);
  76   }
  77 }
  78 
  79 void ShenandoahConcurrentThread::service_partial_cycle() {
  80   GCIdMark gc_id_mark;
  81   VM_ShenandoahPartialGC partial_gc;
  82   VMThread::execute(&partial_gc);
  83 }
  84 
  85 void ShenandoahConcurrentThread::service_normal_cycle() {
  86   if (check_cancellation()) return;
  87 
  88   ShenandoahHeap* heap = ShenandoahHeap::heap();
  89 
  90   GCTimer* gc_timer = heap->gc_timer();
  91 
  92   gc_timer->register_gc_start();
  93 
  94   heap->shenandoahPolicy()->increase_cycle_counter();
  95 
  96   GCIdMark gc_id_mark;
  97   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
  98   TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified);
  99 
 100   // Start initial mark under STW:
 101   {
 102     // Workers are setup by VM_ShenandoahInitMark
 103     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 104     VM_ShenandoahInitMark initMark;
 105     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 106     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_mark_gross);
 107     VMThread::execute(&initMark);
 108     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_mark_gross);
 109     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 110   }
 111 
 112   if (check_cancellation()) return;
 113 
 114   // Continue concurrent mark:
 115   {
 116     // Setup workers for concurrent marking phase
 117     WorkGang* workers = heap->workers();
 118     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_marking(workers->active_workers(),
 119                                                                               (uint) Threads::number_of_non_daemon_threads());
 120     ShenandoahWorkerScope scope(workers, n_workers);
 121 
 122     GCTraceTime(Info, gc) time("Concurrent marking", gc_timer, GCCause::_no_gc, true);
 123     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 124     ShenandoahHeap::heap()->concurrentMark()->mark_from_roots();
 125   }
 126 
 127   // Possibly hand over remaining marking work to final-mark phase.
 128   bool clear_full_gc = false;
 129   if (heap->cancelled_concgc()) {
 130     heap->shenandoahPolicy()->record_cm_cancelled();
 131     if (_full_gc_cause == GCCause::_allocation_failure &&
 132         heap->shenandoahPolicy()->handover_cancelled_marking()) {
 133       heap->clear_cancelled_concgc();
 134       clear_full_gc = true;
 135       heap->shenandoahPolicy()->record_cm_degenerated();
 136     } else {
 137       heap->gc_timer()->register_gc_end();
 138       return;
 139     }
 140   } else {
 141     heap->shenandoahPolicy()->record_cm_success();
 142   }
 143 
 144   // Proceed to complete marking under STW, and start evacuation:
 145   {
 146     // Workers are setup by VM_ShenandoahStartEvacuation
 147     TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
 148     VM_ShenandoahStartEvacuation finishMark;
 149     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross);
 150     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_mark_gross);
 151     VMThread::execute(&finishMark);
 152     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_mark_gross);
 153     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross);
 154   }
 155 
 156   if (check_cancellation()) return;
 157 
 158   // If we handed off remaining marking work above, we need to kick off waiting Java threads
 159   if (clear_full_gc) {
 160     reset_full_gc();
 161   }
 162 
 163   // Continue concurrent evacuation:
 164   {
 165     // Setup workers for concurrent evacuation phase
 166     WorkGang* workers = heap->workers();
 167     uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(workers->active_workers(),
 168                                                                                  (uint) Threads::number_of_non_daemon_threads());
 169     ShenandoahWorkerScope scope(workers, n_workers);
 170 
 171     GCTraceTime(Info, gc) time("Concurrent evacuation ", gc_timer, GCCause::_no_gc, true);
 172     TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 173     heap->do_evacuation();
 174   }
 175 
 176   // Prepare for the next normal cycle:
 177   if (check_cancellation()) return;
 178 
 179   {
 180     GCTraceTime(Info, gc) time("Concurrent reset bitmaps", gc_timer, GCCause::_no_gc);
 181     heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps);
 182     WorkGang* workers = heap->workers();
 183     ShenandoahPushWorkerScope scope(workers, heap->max_workers());
 184     heap->reset_next_mark_bitmap(workers);
 185     heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps);
 186   }
 187 
 188   gc_timer->register_gc_end();
 189 }
 190 
 191 bool ShenandoahConcurrentThread::check_cancellation() {
 192   ShenandoahHeap* heap = ShenandoahHeap::heap();
 193   if (heap->cancelled_concgc()) {
 194     assert (is_full_gc() || in_graceful_shutdown(), "Cancel GC either for Full GC, or gracefully exiting");
 195     heap->gc_timer()->register_gc_end();
 196     return true;
 197   }
 198   return false;
 199 }
 200 
 201 
 202 void ShenandoahConcurrentThread::stop_service() {
 203   // Nothing to do here.
 204 }
 205 
 206 void ShenandoahConcurrentThread::service_fullgc_cycle() {
 207   GCIdMark gc_id_mark;
 208   ShenandoahHeap* heap = ShenandoahHeap::heap();
 209 
 210   {
 211     if (_full_gc_cause == GCCause::_allocation_failure) {
 212       heap->shenandoahPolicy()->record_allocation_failure_gc();
 213     } else {
 214       heap->shenandoahPolicy()->record_user_requested_gc();
 215     }
 216 
 217     TraceCollectorStats tcs(heap->monitoring_support()->full_collection_counters());
 218     TraceMemoryManagerStats tmms(true, _full_gc_cause);
 219     VM_ShenandoahFullGC full_gc(_full_gc_cause);
 220     VMThread::execute(&full_gc);
 221   }
 222 
 223   reset_full_gc();
 224 }
 225 
 226 void ShenandoahConcurrentThread::do_full_gc(GCCause::Cause cause) {
 227   assert(Thread::current()->is_Java_thread(), "expect Java thread here");
 228 
 229   if (try_set_full_gc()) {
 230     _full_gc_cause = cause;
 231 
 232     // Now that full GC is scheduled, we can abort everything else
 233     ShenandoahHeap::heap()->cancel_concgc(cause);
 234   } else {
 235     if (_full_gc_cause != cause) {
 236       log_info(gc)("Full GC is already pending with cause: %s; new cause is %s",
 237                    GCCause::to_string(_full_gc_cause),
 238                    GCCause::to_string(cause));
 239     }
 240   }
 241 
 242   MonitorLockerEx ml(&_full_gc_lock);
 243   while (is_full_gc()) {
 244     ml.wait();
 245   }
 246   assert(!is_full_gc(), "expect full GC to have completed");
 247 }
 248 
 249 void ShenandoahConcurrentThread::reset_full_gc() {
 250   OrderAccess::release_store_fence(&_do_full_gc, 0);
 251   MonitorLockerEx ml(&_full_gc_lock);
 252   ml.notify_all();
 253 }
 254 
 255 bool ShenandoahConcurrentThread::try_set_full_gc() {
 256   jbyte old = Atomic::cmpxchg(1, &_do_full_gc, 0);
 257   return old == 0; // success
 258 }
 259 
 260 bool ShenandoahConcurrentThread::is_full_gc() {
 261   return OrderAccess::load_acquire(&_do_full_gc) == 1;
 262 }
 263 
 264 void ShenandoahConcurrentThread::print() const {
 265   print_on(tty);
 266 }
 267 
 268 void ShenandoahConcurrentThread::print_on(outputStream* st) const {
 269   st->print("Shenandoah Concurrent Thread");
 270   Thread::print_on(st);
 271   st->cr();
 272 }
 273 
 274 void ShenandoahConcurrentThread::sleepBeforeNextCycle() {
 275   assert(false, "Wake up in the GC thread that never sleeps :-)");
 276 }
 277 
 278 void ShenandoahConcurrentThread::start() {
 279   create_and_start();
 280 }
 281 
 282 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() {
 283   OrderAccess::release_store_fence(&_graceful_shutdown, 1);
 284 }
 285 
 286 bool ShenandoahConcurrentThread::in_graceful_shutdown() {
 287   return OrderAccess::load_acquire(&_graceful_shutdown) == 1;
 288 }