< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp

Print this page
rev 47675 : [mq]: 8149127-rename-concurrentrefine-a
rev 47676 : imported patch 8149127-rename-concurrentrefine-b
rev 47677 : [mq]: 8149127-rename-concurrentrefine-b-stefanj-review
rev 47678 : imported patch 8140255-move-sampling-thread
rev 47679 : [mq]: 8140255-update
rev 47680 : [mq]: 8190426-lazy-init-refinement-threads

*** 31,40 **** --- 31,113 ---- #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/pair.hpp" #include <math.h> + G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() : + _cg1r(NULL), + _threads(NULL), + _num_max_threads(0) + { + } + + G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() { + for (uint i = 0; i < _num_max_threads; i++) { + G1ConcurrentRefineThread* t = _threads[i]; + if (t != NULL) { + delete t; + } + } + FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads); + } + + void G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cg1r, uint num_max_threads) { + assert(cg1r != NULL, "Passed g1ConcurrentRefine must not be NULL"); + _cg1r = cg1r; + _num_max_threads = num_max_threads; + _threads = NEW_C_HEAP_ARRAY(G1ConcurrentRefineThread*, num_max_threads, mtGC); + for (uint i = 0; i < num_max_threads; i++) { + if (UseDynamicNumberOfGCThreads) { + _threads[i] = NULL; + } else { + _threads[i] = new G1ConcurrentRefineThread(_cg1r, i); + } + } + } + + void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) { + assert(cur_worker_id < _num_max_threads, "Tried to activate from impossible thread %u", cur_worker_id); + if (cur_worker_id == (_num_max_threads - 1)) { + // Already the last thread, there is no more thread to activate. + return; + } + + uint worker_id = cur_worker_id + 1; + G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id]; + if (thread_to_activate == NULL) { + // Still need to create the thread... + _threads[worker_id] = new G1ConcurrentRefineThread(_cg1r, worker_id); + thread_to_activate = _threads[worker_id]; + } + thread_to_activate->activate(); + } + + void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const { + for (uint i = 0; i < _num_max_threads; ++i) { + if (_threads[i] != NULL) { + _threads[i]->print_on(st); + st->cr(); + } + } + } + + void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) { + for (uint i = 0; i < _num_max_threads; i++) { + if (_threads[i] != NULL) { + tc->do_thread(_threads[i]); + } + } + } + + void G1ConcurrentRefineThreadControl::stop() { + for (uint i = 0; i < _num_max_threads; i++) { + if (_threads[i] != NULL) { + _threads[i]->stop(); + } + } + } + // Arbitrary but large limits, to simplify some of the zone calculations. // The general idea is to allow expressions like // MIN2(x OP y, max_XXX_zone) // without needing to check for overflow in "x OP y", because the // ranges for x and y have been restricted.
*** 94,104 **** static Thresholds calc_thresholds(size_t green_zone, size_t yellow_zone, uint worker_i) { double yellow_size = yellow_zone - green_zone; ! double step = yellow_size / G1ConcurrentRefine::thread_num(); if (worker_i == 0) { // Potentially activate worker 0 more aggressively, to keep // available buffers near green_zone value. When yellow_size is // large we don't want to allow a full step to accumulate before // doing any processing, as that might lead to significantly more --- 167,177 ---- static Thresholds calc_thresholds(size_t green_zone, size_t yellow_zone, uint worker_i) { double yellow_size = yellow_zone - green_zone; ! double step = yellow_size / G1ConcurrentRefine::max_num_threads(); if (worker_i == 0) { // Potentially activate worker 0 more aggressively, to keep // available buffers near green_zone value. When yellow_size is // large we don't want to allow a full step to accumulate before // doing any processing, as that might lead to significantly more
*** 113,135 **** G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone, size_t yellow_zone, size_t red_zone, size_t min_yellow_zone_size) : ! _threads(NULL), ! _n_worker_threads(thread_num()), _green_zone(green_zone), _yellow_zone(yellow_zone), _red_zone(red_zone), _min_yellow_zone_size(min_yellow_zone_size) { assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone); } static size_t calc_min_yellow_zone_size() { size_t step = G1ConcRefinementThresholdStep; ! uint n_workers = G1ConcurrentRefine::thread_num(); if ((max_yellow_zone / step) < n_workers) { return max_yellow_zone; } else { return step * n_workers; } --- 186,208 ---- G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone, size_t yellow_zone, size_t red_zone, size_t min_yellow_zone_size) : ! _thread_control(), _green_zone(green_zone), _yellow_zone(yellow_zone), _red_zone(red_zone), _min_yellow_zone_size(min_yellow_zone_size) { assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone); + _thread_control.initialize(this, max_num_threads()); } static size_t calc_min_yellow_zone_size() { size_t step = G1ConcRefinementThresholdStep; ! uint n_workers = G1ConcurrentRefine::max_num_threads(); if ((max_yellow_zone / step) < n_workers) { return max_yellow_zone; } else { return step * n_workers; }
*** 189,269 **** *ecode = JNI_ENOMEM; vm_shutdown_during_initialization("Could not create G1ConcurrentRefine"); return NULL; } - cr->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, cr->_n_worker_threads, mtGC); - if (cr->_threads == NULL) { - *ecode = JNI_ENOMEM; - vm_shutdown_during_initialization("Could not allocate an array for G1ConcurrentRefineThread"); - return NULL; - } - - uint worker_id_offset = DirtyCardQueueSet::num_par_ids(); - - G1ConcurrentRefineThread *next = NULL; - for (uint i = cr->_n_worker_threads - 1; i != UINT_MAX; i--) { - Thresholds thresholds = calc_thresholds(green_zone, yellow_zone, i); - G1ConcurrentRefineThread* t = - new G1ConcurrentRefineThread(cr, - next, - worker_id_offset, - i, - activation_level(thresholds), - deactivation_level(thresholds)); - assert(t != NULL, "Conc refine should have been created"); - if (t->osthread() == NULL) { - *ecode = JNI_ENOMEM; - vm_shutdown_during_initialization("Could not create G1ConcurrentRefineThread"); - return NULL; - } - - assert(t->cr() == cr, "Conc refine thread should refer to this"); - cr->_threads[i] = t; - next = t; - } - *ecode = JNI_OK; return cr; } void G1ConcurrentRefine::stop() { ! for (uint i = 0; i < _n_worker_threads; i++) { ! _threads[i]->stop(); ! } ! } ! ! void G1ConcurrentRefine::update_thread_thresholds() { ! for (uint i = 0; i < _n_worker_threads; i++) { ! Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, i); ! _threads[i]->update_thresholds(activation_level(thresholds), ! deactivation_level(thresholds)); ! } } G1ConcurrentRefine::~G1ConcurrentRefine() { - for (uint i = 0; i < _n_worker_threads; i++) { - delete _threads[i]; - } - FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads); } void G1ConcurrentRefine::threads_do(ThreadClosure *tc) { ! for (uint i = 0; i < _n_worker_threads; i++) { ! tc->do_thread(_threads[i]); ! } } ! uint G1ConcurrentRefine::thread_num() { return G1ConcRefinementThreads; } void G1ConcurrentRefine::print_threads_on(outputStream* st) const { ! for (uint i = 0; i < _n_worker_threads; ++i) { ! _threads[i]->print_on(st); ! st->cr(); ! } } static size_t calc_new_green_zone(size_t green, double update_rs_time, size_t update_rs_processed_buffers, --- 262,292 ---- *ecode = JNI_ENOMEM; vm_shutdown_during_initialization("Could not create G1ConcurrentRefine"); return NULL; } *ecode = JNI_OK; return cr; } void G1ConcurrentRefine::stop() { ! _thread_control.stop(); } G1ConcurrentRefine::~G1ConcurrentRefine() { } void G1ConcurrentRefine::threads_do(ThreadClosure *tc) { ! _thread_control.worker_threads_do(tc); } ! uint G1ConcurrentRefine::max_num_threads() { return G1ConcRefinementThreads; } void G1ConcurrentRefine::print_threads_on(outputStream* st) const { ! _thread_control.print_on(st); } static size_t calc_new_green_zone(size_t green, double update_rs_time, size_t update_rs_processed_buffers,
*** 324,343 **** double goal_ms) { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); if (G1UseAdaptiveConcRefinement) { update_zones(update_rs_time, update_rs_processed_buffers, goal_ms); - update_thread_thresholds(); // Change the barrier params ! if (_n_worker_threads == 0) { // Disable dcqs notification when there are no threads to notify. dcqs.set_process_completed_threshold(INT_MAX); } else { // Worker 0 is the primary; wakeup is via dcqs notification. STATIC_ASSERT(max_yellow_zone <= INT_MAX); ! size_t activate = _threads[0]->activation_threshold(); dcqs.set_process_completed_threshold((int)activate); } dcqs.set_max_completed_queue((int)red_zone()); } --- 347,365 ---- double goal_ms) { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); if (G1UseAdaptiveConcRefinement) { update_zones(update_rs_time, update_rs_processed_buffers, goal_ms); // Change the barrier params ! if (max_num_threads() == 0) { // Disable dcqs notification when there are no threads to notify. dcqs.set_process_completed_threshold(INT_MAX); } else { // Worker 0 is the primary; wakeup is via dcqs notification. STATIC_ASSERT(max_yellow_zone <= INT_MAX); ! size_t activate = activation_threshold(0); dcqs.set_process_completed_threshold((int)activate); } dcqs.set_max_completed_queue((int)red_zone()); }
*** 347,351 **** --- 369,412 ---- } else { dcqs.set_completed_queue_padding(0); } dcqs.notify_if_necessary(); } + + size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const { + Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id); + return activation_level(thresholds); + } + + size_t G1ConcurrentRefine::deactivation_threshold(uint worker_id) const { + Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id); + return deactivation_level(thresholds); + } + + uint G1ConcurrentRefine::worker_id_offset() { + return DirtyCardQueueSet::num_par_ids(); + } + + void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) { + if (activation_threshold(worker_id + 1) > num_cur_buffers) { + _thread_control.maybe_activate_next(worker_id); + } + } + + bool G1ConcurrentRefine::do_refinement_step(uint worker_id) { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + + size_t curr_buffer_num = dcqs.completed_buffers_num(); + // If the number of the buffers falls down into the yellow zone, + // that means that the transition period after the evacuation pause has ended. + // Since the value written to the DCQS is the same for all threads, there is no + // need to synchronize. + if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= yellow_zone()) { + dcqs.set_completed_queue_padding(0); + } + + maybe_activate_more_threads(worker_id, curr_buffer_num); + + // Process the next buffer, if there are enough left. + return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(), + deactivation_threshold(worker_id)); + }
< prev index next >