< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp

Print this page
rev 47863 : imported patch 8190426-lazy-init-refinement-threads
rev 47864 : [mq]: 8190426-sangheon-review


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1ConcurrentRefine.hpp"
  27 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  28 #include "logging/log.hpp"
  29 #include "runtime/java.hpp"
  30 #include "runtime/thread.hpp"
  31 #include "utilities/debug.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 #include "utilities/pair.hpp"
  34 #include <math.h>
  35 





































































































  36 // Arbitrary but large limits, to simplify some of the zone calculations.
  37 // The general idea is to allow expressions like
  38 //   MIN2(x OP y, max_XXX_zone)
  39 // without needing to check for overflow in "x OP y", because the
  40 // ranges for x and y have been restricted.
  41 STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
  42 const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
  43 const size_t max_green_zone = max_yellow_zone / 2;
  44 const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_queue.
  45 STATIC_ASSERT(max_yellow_zone <= max_red_zone);
  46 
  47 // Range check assertions for green zone values.
  48 #define assert_zone_constraints_g(green)                        \
  49   do {                                                          \
  50     size_t azc_g_green = (green);                               \
  51     assert(azc_g_green <= max_green_zone,                       \
  52            "green exceeds max: " SIZE_FORMAT, azc_g_green);     \
  53   } while (0)
  54 
  55 // Range check assertions for green and yellow zone values.


  79            azc_gyr_yellow, azc_gyr_red);                                \
  80   } while (0)
  81 
  82 // Logging tag sequence for refinement control updates.
  83 #define CTRL_TAGS gc, ergo, refine
  84 
  85 // For logging zone values, ensuring consistency of level and tags.
  86 #define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
  87 
  88 // Package for pair of refinement thread activation and deactivation
  89 // thresholds.  The activation and deactivation levels are resp. the first
  90 // and second values of the pair.
  91 typedef Pair<size_t, size_t> Thresholds;
  92 inline size_t activation_level(const Thresholds& t) { return t.first; }
  93 inline size_t deactivation_level(const Thresholds& t) { return t.second; }
  94 
  95 static Thresholds calc_thresholds(size_t green_zone,
  96                                   size_t yellow_zone,
  97                                   uint worker_i) {
  98   double yellow_size = yellow_zone - green_zone;
  99   double step = yellow_size / G1ConcurrentRefine::thread_num();
 100   if (worker_i == 0) {
 101     // Potentially activate worker 0 more aggressively, to keep
 102     // available buffers near green_zone value.  When yellow_size is
 103     // large we don't want to allow a full step to accumulate before
 104     // doing any processing, as that might lead to significantly more
 105     // than green_zone buffers to be processed by update_rs.
 106     step = MIN2(step, ParallelGCThreads / 2.0);
 107   }
 108   size_t activate_offset = static_cast<size_t>(ceil(step * (worker_i + 1)));
 109   size_t deactivate_offset = static_cast<size_t>(floor(step * worker_i));
 110   return Thresholds(green_zone + activate_offset,
 111                     green_zone + deactivate_offset);
 112 }
 113 
 114 G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
 115                                        size_t yellow_zone,
 116                                        size_t red_zone,
 117                                        size_t min_yellow_zone_size) :
 118   _threads(NULL),
 119   _n_worker_threads(thread_num()),
 120   _green_zone(green_zone),
 121   _yellow_zone(yellow_zone),
 122   _red_zone(red_zone),
 123   _min_yellow_zone_size(min_yellow_zone_size)
 124 {
 125   assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
 126 }
 127 




 128 static size_t calc_min_yellow_zone_size() {
 129   size_t step = G1ConcRefinementThresholdStep;
 130   uint n_workers = G1ConcurrentRefine::thread_num();
 131   if ((max_yellow_zone / step) < n_workers) {
 132     return max_yellow_zone;
 133   } else {
 134     return step * n_workers;
 135   }
 136 }
 137 
 138 static size_t calc_init_green_zone() {
 139   size_t green = G1ConcRefinementGreenZone;
 140   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
 141     green = ParallelGCThreads;
 142   }
 143   return MIN2(green, max_green_zone);
 144 }
 145 
 146 static size_t calc_init_yellow_zone(size_t green, size_t min_size) {
 147   size_t config = G1ConcRefinementYellowZone;
 148   size_t size = 0;
 149   if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
 150     size = green * 2;


 174   size_t red_zone = calc_init_red_zone(green_zone, yellow_zone);
 175 
 176   LOG_ZONES("Initial Refinement Zones: "
 177             "green: " SIZE_FORMAT ", "
 178             "yellow: " SIZE_FORMAT ", "
 179             "red: " SIZE_FORMAT ", "
 180             "min yellow size: " SIZE_FORMAT,
 181             green_zone, yellow_zone, red_zone, min_yellow_zone_size);
 182 
 183   G1ConcurrentRefine* cr = new G1ConcurrentRefine(green_zone,
 184                                                   yellow_zone,
 185                                                   red_zone,
 186                                                   min_yellow_zone_size);
 187 
 188   if (cr == NULL) {
 189     *ecode = JNI_ENOMEM;
 190     vm_shutdown_during_initialization("Could not create G1ConcurrentRefine");
 191     return NULL;
 192   }
 193 
 194   cr->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, cr->_n_worker_threads, mtGC);
 195   if (cr->_threads == NULL) {
 196     *ecode = JNI_ENOMEM;
 197     vm_shutdown_during_initialization("Could not allocate an array for G1ConcurrentRefineThread");
 198     return NULL;
 199   }
 200 
 201   uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
 202 
 203   G1ConcurrentRefineThread *next = NULL;
 204   for (uint i = cr->_n_worker_threads - 1; i != UINT_MAX; i--) {
 205     Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i);
 206     G1ConcurrentRefineThread* t =
 207       new G1ConcurrentRefineThread(cr,
 208                                    next,
 209                                    worker_id_offset,
 210                                    i,
 211                                    activation_level(thresholds),
 212                                    deactivation_level(thresholds));
 213     assert(t != NULL, "Conc refine should have been created");
 214     if (t->osthread() == NULL) {
 215       *ecode = JNI_ENOMEM;
 216       vm_shutdown_during_initialization("Could not create G1ConcurrentRefineThread");
 217       return NULL;
 218     }
 219 
 220     assert(t->cr() == cr, "Conc refine thread should refer to this");
 221     cr->_threads[i] = t;
 222     next = t;
 223   }
 224 
 225   *ecode = JNI_OK;
 226   return cr;
 227 }
 228 
 229 void G1ConcurrentRefine::stop() {
 230   for (uint i = 0; i < _n_worker_threads; i++) {
 231     _threads[i]->stop();
 232   }
 233 }
 234 
 235 void G1ConcurrentRefine::update_thread_thresholds() {
 236   for (uint i = 0; i < _n_worker_threads; i++) {
 237     Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i);
 238     _threads[i]->update_thresholds(activation_level(thresholds),
 239                                    deactivation_level(thresholds));
 240   }
 241 }
 242 
 243 G1ConcurrentRefine::~G1ConcurrentRefine() {
 244   for (uint i = 0; i < _n_worker_threads; i++) {
 245     delete _threads[i];
 246   }
 247   FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
 248 }
 249 
 250 void G1ConcurrentRefine::threads_do(ThreadClosure *tc) {
 251   for (uint i = 0; i < _n_worker_threads; i++) {
 252     tc->do_thread(_threads[i]);
 253   }
 254 }
 255 
 256 uint G1ConcurrentRefine::thread_num() {
 257   return G1ConcRefinementThreads;
 258 }
 259 
 260 void G1ConcurrentRefine::print_threads_on(outputStream* st) const {
 261   for (uint i = 0; i < _n_worker_threads; ++i) {
 262     _threads[i]->print_on(st);
 263     st->cr();
 264   }
 265 }
 266 
 267 static size_t calc_new_green_zone(size_t green,
 268                                   double update_rs_time,
 269                                   size_t update_rs_processed_buffers,
 270                                   double goal_ms) {
 271   // Adjust green zone based on whether we're meeting the time goal.
 272   // Limit to max_green_zone.
 273   const double inc_k = 1.1, dec_k = 0.9;
 274   if (update_rs_time > goal_ms) {
 275     if (green > 0) {
 276       green = static_cast<size_t>(green * dec_k);
 277     }
 278   } else if (update_rs_time < goal_ms &&
 279              update_rs_processed_buffers > green) {
 280     green = static_cast<size_t>(MAX2(green * inc_k, green + 1.0));
 281     green = MIN2(green, max_green_zone);
 282   }
 283   return green;
 284 }


 309                                     update_rs_processed_buffers,
 310                                     goal_ms);
 311   _yellow_zone = calc_new_yellow_zone(_green_zone, _min_yellow_zone_size);
 312   _red_zone = calc_new_red_zone(_green_zone, _yellow_zone);
 313 
 314   assert_zone_constraints_gyr(_green_zone, _yellow_zone, _red_zone);
 315   LOG_ZONES("Updated Refinement Zones: "
 316             "green: " SIZE_FORMAT ", "
 317             "yellow: " SIZE_FORMAT ", "
 318             "red: " SIZE_FORMAT,
 319             _green_zone, _yellow_zone, _red_zone);
 320 }
 321 
 322 void G1ConcurrentRefine::adjust(double update_rs_time,
 323                                 size_t update_rs_processed_buffers,
 324                                 double goal_ms) {
 325   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 326 
 327   if (G1UseAdaptiveConcRefinement) {
 328     update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
 329     update_thread_thresholds();
 330 
 331     // Change the barrier params
 332     if (_n_worker_threads == 0) {
 333       // Disable dcqs notification when there are no threads to notify.
 334       dcqs.set_process_completed_threshold(INT_MAX);
 335     } else {
 336       // Worker 0 is the primary; wakeup is via dcqs notification.
 337       STATIC_ASSERT(max_yellow_zone <= INT_MAX);
 338       size_t activate = _threads[0]->activation_threshold();
 339       dcqs.set_process_completed_threshold((int)activate);
 340     }
 341     dcqs.set_max_completed_queue((int)red_zone());
 342   }
 343 
 344   size_t curr_queue_size = dcqs.completed_buffers_num();
 345   if (curr_queue_size >= yellow_zone()) {
 346     dcqs.set_completed_queue_padding(curr_queue_size);
 347   } else {
 348     dcqs.set_completed_queue_padding(0);
 349   }
 350   dcqs.notify_if_necessary();







































 351 }


  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1ConcurrentRefine.hpp"
  27 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  28 #include "logging/log.hpp"
  29 #include "runtime/java.hpp"
  30 #include "runtime/thread.hpp"
  31 #include "utilities/debug.hpp"
  32 #include "utilities/globalDefinitions.hpp"
  33 #include "utilities/pair.hpp"
  34 #include <math.h>
  35 
  36 G1ConcurrentRefineThread* G1ConcurrentRefineThreadControl::create_refinement_thread(uint worker_id, bool initializing) {
  37   G1ConcurrentRefineThread* result = NULL;
  38   if (initializing || !InjectGCWorkerCreationFailure) {
  39     result = new G1ConcurrentRefineThread(_cr, worker_id);
  40   }
  41   if (result == NULL || result->osthread() == NULL) {
  42     log_warning(gc)("Failed to create refinement thread %u, no more %s",
  43                     worker_id,
  44                     result == NULL ? "memory" : "OS threads");
  45   }
  46   return result;
  47 }
  48 
  49 G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() :
  50   _cr(NULL),
  51   _threads(NULL),
  52   _num_max_threads(0)
  53 {
  54 }
  55 
  56 G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() {
  57   for (uint i = 0; i < _num_max_threads; i++) {
  58     G1ConcurrentRefineThread* t = _threads[i];
  59     if (t != NULL) {
  60       delete t;
  61     }
  62   }
  63   FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
  64 }
  65 
  66 jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint num_max_threads) {
  67   assert(cr != NULL, "G1ConcurrentRefine must not be NULL");
  68   _cr = cr;
  69   _num_max_threads = num_max_threads;
  70 
  71   _threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC);
  72   if (_threads == NULL) {
  73     vm_shutdown_during_initialization("Could not allocate thread holder array.");
  74     return JNI_ENOMEM;
  75   }
  76 
  77   for (uint i = 0; i < num_max_threads; i++) {
  78     if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
  79       _threads[i] = NULL;
  80     } else {
  81       _threads[i] = create_refinement_thread(i, true);
  82       if (_threads[i] == NULL) {
  83         vm_shutdown_during_initialization("Could not allocate refinement threads.");
  84         return JNI_ENOMEM;
  85       }
  86     }
  87   }
  88   return JNI_OK;
  89 }
  90 
  91 void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) {
  92   assert(cur_worker_id < _num_max_threads,
  93          "Activating another thread from %u not allowed since there can be at most %u",
  94          cur_worker_id, _num_max_threads);
  95   if (cur_worker_id == (_num_max_threads - 1)) {
  96     // Already the last thread, there is no more thread to activate.
  97     return;
  98   }
  99 
 100   uint worker_id = cur_worker_id + 1;
 101   G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id];
 102   if (thread_to_activate == NULL) {
 103     // Still need to create the thread...
 104     _threads[worker_id] = create_refinement_thread(worker_id, false);
 105     thread_to_activate = _threads[worker_id];
 106   }
 107   if (thread_to_activate != NULL) {
 108     thread_to_activate->activate();
 109   }
 110 }
 111 
 112 void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const {
 113   for (uint i = 0; i < _num_max_threads; ++i) {
 114     if (_threads[i] != NULL) {
 115       _threads[i]->print_on(st);
 116       st->cr();
 117     }
 118   }
 119 }
 120 
 121 void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
 122   for (uint i = 0; i < _num_max_threads; i++) {
 123     if (_threads[i] != NULL) {
 124       tc->do_thread(_threads[i]);
 125     }
 126   }
 127 }
 128 
 129 void G1ConcurrentRefineThreadControl::stop() {
 130   for (uint i = 0; i < _num_max_threads; i++) {
 131     if (_threads[i] != NULL) {
 132       _threads[i]->stop();
 133     }
 134   }
 135 }
 136 
 137 // Arbitrary but large limits, to simplify some of the zone calculations.
 138 // The general idea is to allow expressions like
 139 //   MIN2(x OP y, max_XXX_zone)
 140 // without needing to check for overflow in "x OP y", because the
 141 // ranges for x and y have been restricted.
 142 STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
 143 const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
 144 const size_t max_green_zone = max_yellow_zone / 2;
 145 const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_queue.
 146 STATIC_ASSERT(max_yellow_zone <= max_red_zone);
 147 
 148 // Range check assertions for green zone values.
 149 #define assert_zone_constraints_g(green)                        \
 150   do {                                                          \
 151     size_t azc_g_green = (green);                               \
 152     assert(azc_g_green <= max_green_zone,                       \
 153            "green exceeds max: " SIZE_FORMAT, azc_g_green);     \
 154   } while (0)
 155 
 156 // Range check assertions for green and yellow zone values.


 180            azc_gyr_yellow, azc_gyr_red);                                \
 181   } while (0)
 182 
 183 // Logging tag sequence for refinement control updates.
 184 #define CTRL_TAGS gc, ergo, refine
 185 
 186 // For logging zone values, ensuring consistency of level and tags.
 187 #define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
 188 
 189 // Package for pair of refinement thread activation and deactivation
 190 // thresholds.  The activation and deactivation levels are resp. the first
 191 // and second values of the pair.
 192 typedef Pair<size_t, size_t> Thresholds;
 193 inline size_t activation_level(const Thresholds& t) { return t.first; }
 194 inline size_t deactivation_level(const Thresholds& t) { return t.second; }
 195 
 196 static Thresholds calc_thresholds(size_t green_zone,
 197                                   size_t yellow_zone,
 198                                   uint worker_i) {
 199   double yellow_size = yellow_zone - green_zone;
 200   double step = yellow_size / G1ConcurrentRefine::max_num_threads();
 201   if (worker_i == 0) {
 202     // Potentially activate worker 0 more aggressively, to keep
 203     // available buffers near green_zone value.  When yellow_size is
 204     // large we don't want to allow a full step to accumulate before
 205     // doing any processing, as that might lead to significantly more
 206     // than green_zone buffers to be processed by update_rs.
 207     step = MIN2(step, ParallelGCThreads / 2.0);
 208   }
 209   size_t activate_offset = static_cast<size_t>(ceil(step * (worker_i + 1)));
 210   size_t deactivate_offset = static_cast<size_t>(floor(step * worker_i));
 211   return Thresholds(green_zone + activate_offset,
 212                     green_zone + deactivate_offset);
 213 }
 214 
 215 G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
 216                                        size_t yellow_zone,
 217                                        size_t red_zone,
 218                                        size_t min_yellow_zone_size) :
 219   _thread_control(),

 220   _green_zone(green_zone),
 221   _yellow_zone(yellow_zone),
 222   _red_zone(red_zone),
 223   _min_yellow_zone_size(min_yellow_zone_size)
 224 {
 225   assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
 226 }
 227 
 228 jint G1ConcurrentRefine::initialize() {
 229   return _thread_control.initialize(this, max_num_threads());
 230 }
 231 
 232 static size_t calc_min_yellow_zone_size() {
 233   size_t step = G1ConcRefinementThresholdStep;
 234   uint n_workers = G1ConcurrentRefine::max_num_threads();
 235   if ((max_yellow_zone / step) < n_workers) {
 236     return max_yellow_zone;
 237   } else {
 238     return step * n_workers;
 239   }
 240 }
 241 
 242 static size_t calc_init_green_zone() {
 243   size_t green = G1ConcRefinementGreenZone;
 244   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
 245     green = ParallelGCThreads;
 246   }
 247   return MIN2(green, max_green_zone);
 248 }
 249 
 250 static size_t calc_init_yellow_zone(size_t green, size_t min_size) {
 251   size_t config = G1ConcRefinementYellowZone;
 252   size_t size = 0;
 253   if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
 254     size = green * 2;


 278   size_t red_zone = calc_init_red_zone(green_zone, yellow_zone);
 279 
 280   LOG_ZONES("Initial Refinement Zones: "
 281             "green: " SIZE_FORMAT ", "
 282             "yellow: " SIZE_FORMAT ", "
 283             "red: " SIZE_FORMAT ", "
 284             "min yellow size: " SIZE_FORMAT,
 285             green_zone, yellow_zone, red_zone, min_yellow_zone_size);
 286 
 287   G1ConcurrentRefine* cr = new G1ConcurrentRefine(green_zone,
 288                                                   yellow_zone,
 289                                                   red_zone,
 290                                                   min_yellow_zone_size);
 291 
 292   if (cr == NULL) {
 293     *ecode = JNI_ENOMEM;
 294     vm_shutdown_during_initialization("Could not create G1ConcurrentRefine");
 295     return NULL;
 296   }
 297 
 298   *ecode = cr->initialize();































 299   return cr;
 300 }
 301 
 302 void G1ConcurrentRefine::stop() {
 303   _thread_control.stop();










 304 }
 305 
 306 G1ConcurrentRefine::~G1ConcurrentRefine() {




 307 }
 308 
 309 void G1ConcurrentRefine::threads_do(ThreadClosure *tc) {
 310   _thread_control.worker_threads_do(tc);


 311 }
 312 
 313 uint G1ConcurrentRefine::max_num_threads() {
 314   return G1ConcRefinementThreads;
 315 }
 316 
 317 void G1ConcurrentRefine::print_threads_on(outputStream* st) const {
 318   _thread_control.print_on(st);



 319 }
 320 
 321 static size_t calc_new_green_zone(size_t green,
 322                                   double update_rs_time,
 323                                   size_t update_rs_processed_buffers,
 324                                   double goal_ms) {
 325   // Adjust green zone based on whether we're meeting the time goal.
 326   // Limit to max_green_zone.
 327   const double inc_k = 1.1, dec_k = 0.9;
 328   if (update_rs_time > goal_ms) {
 329     if (green > 0) {
 330       green = static_cast<size_t>(green * dec_k);
 331     }
 332   } else if (update_rs_time < goal_ms &&
 333              update_rs_processed_buffers > green) {
 334     green = static_cast<size_t>(MAX2(green * inc_k, green + 1.0));
 335     green = MIN2(green, max_green_zone);
 336   }
 337   return green;
 338 }


 363                                     update_rs_processed_buffers,
 364                                     goal_ms);
 365   _yellow_zone = calc_new_yellow_zone(_green_zone, _min_yellow_zone_size);
 366   _red_zone = calc_new_red_zone(_green_zone, _yellow_zone);
 367 
 368   assert_zone_constraints_gyr(_green_zone, _yellow_zone, _red_zone);
 369   LOG_ZONES("Updated Refinement Zones: "
 370             "green: " SIZE_FORMAT ", "
 371             "yellow: " SIZE_FORMAT ", "
 372             "red: " SIZE_FORMAT,
 373             _green_zone, _yellow_zone, _red_zone);
 374 }
 375 
 376 void G1ConcurrentRefine::adjust(double update_rs_time,
 377                                 size_t update_rs_processed_buffers,
 378                                 double goal_ms) {
 379   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 380 
 381   if (G1UseAdaptiveConcRefinement) {
 382     update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);

 383 
 384     // Change the barrier params
 385     if (max_num_threads() == 0) {
 386       // Disable dcqs notification when there are no threads to notify.
 387       dcqs.set_process_completed_threshold(INT_MAX);
 388     } else {
 389       // Worker 0 is the primary; wakeup is via dcqs notification.
 390       STATIC_ASSERT(max_yellow_zone <= INT_MAX);
 391       size_t activate = activation_threshold(0);
 392       dcqs.set_process_completed_threshold((int)activate);
 393     }
 394     dcqs.set_max_completed_queue((int)red_zone());
 395   }
 396 
 397   size_t curr_queue_size = dcqs.completed_buffers_num();
 398   if (curr_queue_size >= yellow_zone()) {
 399     dcqs.set_completed_queue_padding(curr_queue_size);
 400   } else {
 401     dcqs.set_completed_queue_padding(0);
 402   }
 403   dcqs.notify_if_necessary();
 404 }
 405 
 406 size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const {
 407   Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
 408   return activation_level(thresholds);
 409 }
 410 
 411 size_t G1ConcurrentRefine::deactivation_threshold(uint worker_id) const {
 412   Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
 413   return deactivation_level(thresholds);
 414 }
 415 
 416 uint G1ConcurrentRefine::worker_id_offset() {
 417   return DirtyCardQueueSet::num_par_ids();
 418 }
 419 
 420 void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
 421   if (activation_threshold(worker_id + 1) > num_cur_buffers) {
 422     _thread_control.maybe_activate_next(worker_id);
 423   }
 424 }
 425 
 426 bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
 427   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 428 
 429   size_t curr_buffer_num = dcqs.completed_buffers_num();
 430   // If the number of the buffers falls down into the yellow zone,
 431   // that means that the transition period after the evacuation pause has ended.
 432   // Since the value written to the DCQS is the same for all threads, there is no
 433   // need to synchronize.
 434   if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= yellow_zone()) {
 435     dcqs.set_completed_queue_padding(0);
 436   }
 437 
 438   maybe_activate_more_threads(worker_id, curr_buffer_num);
 439 
 440   // Process the next buffer, if there are enough left.
 441   return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(),
 442                                                    deactivation_threshold(worker_id));
 443 }
< prev index next >