< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp

Print this page
rev 57544 : 8236485: Work-in-progress: Epoch synchronization protocol for G1 concurrent refinement
Reviewed-by:


 204   double step = yellow_size / G1ConcurrentRefine::max_num_threads();
 205   if (worker_id == 0) {
 206     // Potentially activate worker 0 more aggressively, to keep
 207     // available buffers near green_zone value.  When yellow_size is
 208     // large we don't want to allow a full step to accumulate before
 209     // doing any processing, as that might lead to significantly more
 210     // than green_zone buffers to be processed during scanning.
 211     step = MIN2(step, ParallelGCThreads / 2.0);
 212   }
 213   size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1)));
 214   size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id));
 215   return Thresholds(green_zone + activate_offset,
 216                     green_zone + deactivate_offset);
 217 }
 218 
 219 G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
 220                                        size_t yellow_zone,
 221                                        size_t red_zone,
 222                                        size_t min_yellow_zone_size) :
 223   _thread_control(),

 224   _green_zone(green_zone),
 225   _yellow_zone(yellow_zone),
 226   _red_zone(red_zone),
 227   _min_yellow_zone_size(min_yellow_zone_size)
 228 {
 229   assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
 230 }
 231 
 232 jint G1ConcurrentRefine::initialize() {
 233   return _thread_control.initialize(this, max_num_threads());
 234 }
 235 
 236 static size_t buffers_to_cards(size_t value) {
 237   return value * G1UpdateBufferSize;
 238 }
 239 
 240 static size_t calc_min_yellow_zone_size() {
 241   size_t step = buffers_to_cards(G1ConcRefinementThresholdStep);
 242   uint n_workers = G1ConcurrentRefine::max_num_threads();
 243   if ((max_yellow_zone / step) < n_workers) {




 204   double step = yellow_size / G1ConcurrentRefine::max_num_threads();
 205   if (worker_id == 0) {
 206     // Potentially activate worker 0 more aggressively, to keep
 207     // available buffers near green_zone value.  When yellow_size is
 208     // large we don't want to allow a full step to accumulate before
 209     // doing any processing, as that might lead to significantly more
 210     // than green_zone buffers to be processed during scanning.
 211     step = MIN2(step, ParallelGCThreads / 2.0);
 212   }
 213   size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1)));
 214   size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id));
 215   return Thresholds(green_zone + activate_offset,
 216                     green_zone + deactivate_offset);
 217 }
 218 
 219 G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
 220                                        size_t yellow_zone,
 221                                        size_t red_zone,
 222                                        size_t min_yellow_zone_size) :
 223   _thread_control(),
 224   _synchronizer_counters(),
 225   _green_zone(green_zone),
 226   _yellow_zone(yellow_zone),
 227   _red_zone(red_zone),
 228   _min_yellow_zone_size(min_yellow_zone_size)
 229 {
 230   assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
 231 }
 232 
 233 jint G1ConcurrentRefine::initialize() {
 234   return _thread_control.initialize(this, max_num_threads());
 235 }
 236 
 237 static size_t buffers_to_cards(size_t value) {
 238   return value * G1UpdateBufferSize;
 239 }
 240 
 241 static size_t calc_min_yellow_zone_size() {
 242   size_t step = buffers_to_cards(G1ConcRefinementThresholdStep);
 243   uint n_workers = G1ConcurrentRefine::max_num_threads();
 244   if ((max_yellow_zone / step) < n_workers) {


< prev index next >