1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentG1RefineThread.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1HotCardCache.hpp"
  30 #include "gc/g1/g1Predictions.hpp"
  31 #include "runtime/java.hpp"
  32 
  33 ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictor) :
  34   _threads(NULL),
  35   _sample_thread(NULL),
  36   _predictor_sigma(predictor->sigma()),
  37   _hot_card_cache(g1h)
  38 {
  39   // Ergonomically select initial concurrent refinement parameters
  40   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
  41     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, ParallelGCThreads);
  42   }
  43   set_green_zone(G1ConcRefinementGreenZone);
  44 
  45   if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
  46     FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
  47   }
  48   set_yellow_zone(MAX2(G1ConcRefinementYellowZone, green_zone()));
  49 
  50   if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
  51     FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
  52   }
  53   set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
  54 
  55 }
  56 
  57 ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {
  58   G1CollectorPolicy* policy = g1h->g1_policy();
  59   ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h, &policy->predictor());
  60   if (cg1r == NULL) {
  61     *ecode = JNI_ENOMEM;
  62     vm_shutdown_during_initialization("Could not create ConcurrentG1Refine");
  63     return NULL;
  64   }
  65   cg1r->_n_worker_threads = thread_num();
  66 
  67   cg1r->reset_threshold_step();
  68 
  69   cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC);
  70   if (cg1r->_threads == NULL) {
  71     *ecode = JNI_ENOMEM;
  72     vm_shutdown_during_initialization("Could not allocate an array for ConcurrentG1RefineThread");
  73     return NULL;
  74   }
  75 
  76   uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
  77 
  78   ConcurrentG1RefineThread *next = NULL;
  79   for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) {
  80     ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(cg1r, next, refine_closure, worker_id_offset, i);
  81     assert(t != NULL, "Conc refine should have been created");
  82     if (t->osthread() == NULL) {
  83       *ecode = JNI_ENOMEM;
  84       vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread");
  85       return NULL;
  86     }
  87 
  88     assert(t->cg1r() == cg1r, "Conc refine thread should refer to this");
  89     cg1r->_threads[i] = t;
  90     next = t;
  91   }
  92 
  93   cg1r->_sample_thread = new G1YoungRemSetSamplingThread();
  94   if (cg1r->_sample_thread->osthread() == NULL) {
  95     *ecode = JNI_ENOMEM;
  96     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
  97     return NULL;
  98   }
  99 
 100   *ecode = JNI_OK;
 101   return cg1r;
 102 }
 103 
 104 void ConcurrentG1Refine::reset_threshold_step() {
 105   if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
 106     _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
 107   } else {
 108     _thread_threshold_step = G1ConcRefinementThresholdStep;
 109   }
 110 }
 111 
 112 void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
 113   _hot_card_cache.initialize(card_counts_storage);
 114 }
 115 
 116 void ConcurrentG1Refine::stop() {
 117   for (uint i = 0; i < _n_worker_threads; i++) {
 118     _threads[i]->stop();
 119   }
 120   _sample_thread->stop();
 121 }
 122 
 123 void ConcurrentG1Refine::reinitialize_threads() {
 124   reset_threshold_step();
 125   for (uint i = 0; i < _n_worker_threads; i++) {
 126     _threads[i]->initialize();
 127   }
 128 }
 129 
 130 ConcurrentG1Refine::~ConcurrentG1Refine() {
 131   for (uint i = 0; i < _n_worker_threads; i++) {
 132     delete _threads[i];
 133   }
 134   FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
 135 
 136   delete _sample_thread;
 137 }
 138 
 139 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
 140   worker_threads_do(tc);
 141   tc->do_thread(_sample_thread);
 142 }
 143 
 144 void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
 145   for (uint i = 0; i < worker_thread_num(); i++) {
 146     tc->do_thread(_threads[i]);
 147   }
 148 }
 149 
 150 uint ConcurrentG1Refine::thread_num() {
 151   return G1ConcRefinementThreads;
 152 }
 153 
 154 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
 155   for (uint i = 0; i < _n_worker_threads; ++i) {
 156     _threads[i]->print_on(st);
 157     st->cr();
 158   }
 159   _sample_thread->print_on(st);
 160   st->cr();
 161 }
 162 
 163 void ConcurrentG1Refine::adjust(double update_rs_time,
 164                                 double update_rs_processed_buffers,
 165                                 double goal_ms) {
 166   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 167 
 168   if (G1UseAdaptiveConcRefinement) {
 169     const int k_gy = 3, k_gr = 6;
 170     const double inc_k = 1.1, dec_k = 0.9;
 171 
 172     size_t g = green_zone();
 173     if (update_rs_time > goal_ms) {
 174       g = (size_t)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
 175     } else {
 176       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
 177         g = (size_t)MAX2(g * inc_k, g + 1.0);
 178       }
 179     }
 180     // Change the refinement threads params
 181     set_green_zone(g);
 182     set_yellow_zone(g * k_gy);
 183     set_red_zone(g * k_gr);
 184     reinitialize_threads();
 185 
 186     size_t processing_threshold_delta = MAX2<size_t>(green_zone() * _predictor_sigma, 1);
 187     size_t processing_threshold = MIN2(green_zone() + processing_threshold_delta,
 188                                     yellow_zone());
 189     // Change the barrier params
 190     dcqs.set_process_completed_threshold((int)processing_threshold);
 191     dcqs.set_max_completed_queue((int)red_zone());
 192   }
 193 
 194   size_t curr_queue_size = dcqs.completed_buffers_num();
 195   if (curr_queue_size >= yellow_zone()) {
 196     dcqs.set_completed_queue_padding(curr_queue_size);
 197   } else {
 198     dcqs.set_completed_queue_padding(0);
 199   }
 200   dcqs.notify_if_necessary();
 201 }