< prev index next >

src/hotspot/share/gc/shared/taskTerminator.cpp

Print this page
rev 60302 : [mq]: 8245721-lkorinth-review

*** 41,77 **** TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : _n_threads(n_threads), _queue_set(queue_set), _offered_termination(0), ! _spin_master(NULL) { ! ! _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never); ! } TaskTerminator::~TaskTerminator() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Must be terminated or aborted"); assert_queue_set_empty(); } assert(_spin_master == NULL, "Should have been reset"); - assert(_blocker != NULL, "Can not be NULL"); - delete _blocker; } #ifdef ASSERT void TaskTerminator::assert_queue_set_empty() const { _queue_set->assert_empty(); } #endif - void TaskTerminator::yield() { - assert(_offered_termination <= _n_threads, "Invariant"); - os::naked_yield(); - } - void TaskTerminator::reset_for_reuse() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Only %u of %u threads offered termination", _offered_termination, _n_threads); assert(_spin_master == NULL, "Leftover spin master " PTR_FORMAT, p2i(_spin_master)); --- 41,68 ---- TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : _n_threads(n_threads), _queue_set(queue_set), _offered_termination(0), ! _blocker(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never), ! _spin_master(NULL) { } TaskTerminator::~TaskTerminator() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Must be terminated or aborted"); assert_queue_set_empty(); } assert(_spin_master == NULL, "Should have been reset"); } #ifdef ASSERT void TaskTerminator::assert_queue_set_empty() const { _queue_set->assert_empty(); } #endif void TaskTerminator::reset_for_reuse() { if (_offered_termination != 0) { assert(_offered_termination == _n_threads, "Only %u of %u threads offered termination", _offered_termination, _n_threads); assert(_spin_master == NULL, "Leftover spin master " PTR_FORMAT, p2i(_spin_master));
*** 91,119 **** size_t TaskTerminator::tasks_in_queue_set() const { return _queue_set->tasks(); } void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) { ! assert(_blocker->is_locked(), "must be"); ! assert(_blocker->owned_by_self(), "must be"); assert(_offered_termination >= 1, "must be"); if (_spin_master == this_thread) { _spin_master = NULL; } if (tasks >= _offered_termination - 1) { ! _blocker->notify_all(); } else { for (; tasks > 1; tasks--) { ! _blocker->notify(); } } } bool TaskTerminator::do_delay_step(DelayContext& delay_context) { ! assert(!_blocker->owned_by_self(), "should not be owned by self"); if (delay_context._yield_count < WorkStealingYieldsBeforeSleep) { delay_context._yield_count++; // Periodically call yield() instead spinning // After WorkStealingSpinToYieldRatio spins, do a yield() call --- 82,110 ---- size_t TaskTerminator::tasks_in_queue_set() const { return _queue_set->tasks(); } void TaskTerminator::prepare_for_return(Thread* this_thread, size_t tasks) { ! assert(_blocker.is_locked(), "must be"); ! assert(_blocker.owned_by_self(), "must be"); assert(_offered_termination >= 1, "must be"); if (_spin_master == this_thread) { _spin_master = NULL; } if (tasks >= _offered_termination - 1) { ! _blocker.notify_all(); } else { for (; tasks > 1; tasks--) { ! _blocker.notify(); } } } bool TaskTerminator::do_delay_step(DelayContext& delay_context) { ! assert(!_blocker.owned_by_self(), "should not be owned by self"); if (delay_context._yield_count < WorkStealingYieldsBeforeSleep) { delay_context._yield_count++; // Periodically call yield() instead spinning // After WorkStealingSpinToYieldRatio spins, do a yield() call
*** 139,160 **** } bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); - assert(_blocker != NULL, "Invariant"); // Single worker, done if (_n_threads == 1) { _offered_termination = 1; assert_queue_set_empty(); return true; } Thread* the_thread = Thread::current(); ! MonitorLocker x(_blocker, Mutex::_no_safepoint_check_flag); _offered_termination++; if (_offered_termination == _n_threads) { prepare_for_return(the_thread); assert_queue_set_empty(); --- 130,150 ---- } bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); // Single worker, done if (_n_threads == 1) { _offered_termination = 1; assert_queue_set_empty(); return true; } Thread* the_thread = Thread::current(); ! MonitorLocker x(&_blocker, Mutex::_no_safepoint_check_flag); _offered_termination++; if (_offered_termination == _n_threads) { prepare_for_return(the_thread); assert_queue_set_empty();
*** 171,181 **** // deciding when to sleep. ++delay_context._yield_count; size_t tasks; bool should_exit_termination; { ! MutexUnlocker y(_blocker, Mutex::_no_safepoint_check_flag); do_delay_step(delay_context); // Intentionally read the number of tasks outside the mutex since this // is potentially a long operation making the locked section long. tasks = tasks_in_queue_set(); should_exit_termination = exit_termination(tasks, terminator); --- 161,171 ---- // deciding when to sleep. ++delay_context._yield_count; size_t tasks; bool should_exit_termination; { ! MutexUnlocker y(&_blocker, Mutex::_no_safepoint_check_flag); do_delay_step(delay_context); // Intentionally read the number of tasks outside the mutex since this // is potentially a long operation making the locked section long. tasks = tasks_in_queue_set(); should_exit_termination = exit_termination(tasks, terminator);
< prev index next >