< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp

Print this page
rev 47863 : imported patch 8190426-lazy-init-refinement-threads
rev 47864 : [mq]: 8190426-sangheon-review

*** 31,42 **** #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/pair.hpp" #include <math.h> G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() : ! _cg1r(NULL), _threads(NULL), _num_max_threads(0) { } --- 31,55 ---- #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/pair.hpp" #include <math.h> + G1ConcurrentRefineThread* G1ConcurrentRefineThreadControl::create_refinement_thread(uint worker_id, bool initializing) { + G1ConcurrentRefineThread* result = NULL; + if (initializing || !InjectGCWorkerCreationFailure) { + result = new G1ConcurrentRefineThread(_cr, worker_id); + } + if (result == NULL || result->osthread() == NULL) { + log_warning(gc)("Failed to create refinement thread %u, no more %s", + worker_id, + result == NULL ? "memory" : "OS threads"); + } + return result; + } + G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() : ! _cr(NULL), _threads(NULL), _num_max_threads(0) { }
*** 48,86 **** } } FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads); } ! void G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cg1r, uint num_max_threads) { ! assert(cg1r != NULL, "Passed g1ConcurrentRefine must not be NULL"); ! _cg1r = cg1r; _num_max_threads = num_max_threads; ! _threads = NEW_C_HEAP_ARRAY(G1ConcurrentRefineThread*, num_max_threads, mtGC); for (uint i = 0; i < num_max_threads; i++) { ! if (UseDynamicNumberOfGCThreads) { _threads[i] = NULL; } else { ! _threads[i] = new G1ConcurrentRefineThread(_cg1r, i); } } } void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) { ! assert(cur_worker_id < _num_max_threads, "Tried to activate from impossible thread %u", cur_worker_id); if (cur_worker_id == (_num_max_threads - 1)) { // Already the last thread, there is no more thread to activate. return; } uint worker_id = cur_worker_id + 1; G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id]; if (thread_to_activate == NULL) { // Still need to create the thread... ! _threads[worker_id] = new G1ConcurrentRefineThread(_cg1r, worker_id); thread_to_activate = _threads[worker_id]; } thread_to_activate->activate(); } void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const { for (uint i = 0; i < _num_max_threads; ++i) { if (_threads[i] != NULL) { --- 61,114 ---- } } FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads); } ! jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint num_max_threads) { ! assert(cr != NULL, "G1ConcurrentRefine must not be NULL"); ! _cr = cr; _num_max_threads = num_max_threads; ! ! _threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC); ! if (_threads == NULL) { ! vm_shutdown_during_initialization("Could not allocate thread holder array."); ! return JNI_ENOMEM; ! } ! for (uint i = 0; i < num_max_threads; i++) { ! if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) { _threads[i] = NULL; } else { ! _threads[i] = create_refinement_thread(i, true); ! if (_threads[i] == NULL) { ! vm_shutdown_during_initialization("Could not allocate refinement threads."); ! return JNI_ENOMEM; ! } } } + return JNI_OK; } void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) { ! assert(cur_worker_id < _num_max_threads, ! "Activating another thread from %u not allowed since there can be at most %u", ! cur_worker_id, _num_max_threads); if (cur_worker_id == (_num_max_threads - 1)) { // Already the last thread, there is no more thread to activate. return; } uint worker_id = cur_worker_id + 1; G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id]; if (thread_to_activate == NULL) { // Still need to create the thread... ! _threads[worker_id] = create_refinement_thread(worker_id, false); thread_to_activate = _threads[worker_id]; } + if (thread_to_activate != NULL) { thread_to_activate->activate(); + } } void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const { for (uint i = 0; i < _num_max_threads; ++i) { if (_threads[i] != NULL) {
*** 193,203 **** _yellow_zone(yellow_zone), _red_zone(red_zone), _min_yellow_zone_size(min_yellow_zone_size) { assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone); ! _thread_control.initialize(this, max_num_threads()); } static size_t calc_min_yellow_zone_size() { size_t step = G1ConcRefinementThresholdStep; uint n_workers = G1ConcurrentRefine::max_num_threads(); --- 221,234 ---- _yellow_zone(yellow_zone), _red_zone(red_zone), _min_yellow_zone_size(min_yellow_zone_size) { assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone); ! } ! ! jint G1ConcurrentRefine::initialize() { ! return _thread_control.initialize(this, max_num_threads()); } static size_t calc_min_yellow_zone_size() { size_t step = G1ConcRefinementThresholdStep; uint n_workers = G1ConcurrentRefine::max_num_threads();
*** 262,272 **** *ecode = JNI_ENOMEM; vm_shutdown_during_initialization("Could not create G1ConcurrentRefine"); return NULL; } ! *ecode = JNI_OK; return cr; } void G1ConcurrentRefine::stop() { _thread_control.stop(); --- 293,303 ---- *ecode = JNI_ENOMEM; vm_shutdown_during_initialization("Could not create G1ConcurrentRefine"); return NULL; } ! *ecode = cr->initialize(); return cr; } void G1ConcurrentRefine::stop() { _thread_control.stop();
< prev index next >