22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/taskTerminator.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "logging/log.hpp"
31
32 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
33 _n_threads(n_threads),
34 _queue_set(queue_set),
35 _offered_termination(0),
36 _spin_master(NULL) {
37
38 _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
39 }
40
41 TaskTerminator::~TaskTerminator() {
42 assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
43 assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
44
45 assert(_spin_master == NULL, "Should have been reset");
46 assert(_blocker != NULL, "Can not be NULL");
47 delete _blocker;
48 }
49
50 #ifdef ASSERT
51 bool TaskTerminator::peek_in_queue_set() {
52 return _queue_set->peek();
53 }
54 #endif
55
56 void TaskTerminator::yield() {
57 assert(_offered_termination <= _n_threads, "Invariant");
58 os::naked_yield();
59 }
60
61 void TaskTerminator::reset_for_reuse() {
62 if (_offered_termination != 0) {
63 assert(_offered_termination == _n_threads,
64 "Terminator may still be in use");
65 _offered_termination = 0;
66 }
67 }
68
69 void TaskTerminator::reset_for_reuse(uint n_threads) {
70 reset_for_reuse();
71 _n_threads = n_threads;
72 }
73
74 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
75 return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
76 }
77
78 size_t TaskTerminator::tasks_in_queue_set() const {
79 return _queue_set->tasks();
80 }
81
82 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
83 assert(_n_threads > 0, "Initialization is incorrect");
84 assert(_offered_termination < _n_threads, "Invariant");
85 assert(_blocker != NULL, "Invariant");
86
87 // Single worker, done
88 if (_n_threads == 1) {
89 _offered_termination = 1;
90 assert(!peek_in_queue_set(), "Precondition");
91 return true;
92 }
93
94 _blocker->lock_without_safepoint_check();
95 _offered_termination++;
96 // All arrived, done
97 if (_offered_termination == _n_threads) {
98 _blocker->notify_all();
99 _blocker->unlock();
100 assert(!peek_in_queue_set(), "Precondition");
101 return true;
102 }
103
104 Thread* the_thread = Thread::current();
105 while (true) {
106 if (_spin_master == NULL) {
107 _spin_master = the_thread;
108
109 _blocker->unlock();
110
111 if (do_spin_master_work(terminator)) {
112 assert(_offered_termination == _n_threads, "termination condition");
113 assert(!peek_in_queue_set(), "Precondition");
114 return true;
115 } else {
116 _blocker->lock_without_safepoint_check();
117 // There is possibility that termination is reached between dropping the lock
118 // before returning from do_spin_master_work() and acquiring lock above.
119 if (_offered_termination == _n_threads) {
120 _blocker->unlock();
121 assert(!peek_in_queue_set(), "Precondition");
122 return true;
123 }
124 }
125 } else {
126 _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
127
128 if (_offered_termination == _n_threads) {
129 _blocker->unlock();
130 assert(!peek_in_queue_set(), "Precondition");
131 return true;
132 }
133 }
134
135 size_t tasks = tasks_in_queue_set();
136 if (exit_termination(tasks, terminator)) {
137 assert_lock_strong(_blocker);
138 _offered_termination--;
139 _blocker->unlock();
140 return false;
141 }
142 }
143 }
144
145 bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
146 uint yield_count = 0;
147 // Number of hard spin loops done since last yield
148 uint hard_spin_count = 0;
149 // Number of iterations in the hard spin loop.
150 uint hard_spin_limit = WorkStealingHardSpins;
|
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shared/taskTerminator.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "logging/log.hpp"
31
32 TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
33 _n_threads(n_threads),
34 _queue_set(queue_set),
35 _offered_termination(0),
36 _spin_master(NULL) {
37
38 _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
39 }
40
41 TaskTerminator::~TaskTerminator() {
42 if (_offered_termination != 0) {
43 assert(_offered_termination == _n_threads, "Must be terminated or aborted");
44 assert_queue_set_empty();
45 }
46
47 assert(_spin_master == NULL, "Should have been reset");
48 assert(_blocker != NULL, "Can not be NULL");
49 delete _blocker;
50 }
51
52 #ifdef ASSERT
53 void TaskTerminator::assert_queue_set_empty() const {
54 _queue_set->assert_empty();
55 }
56 #endif
57
58 void TaskTerminator::yield() {
59 assert(_offered_termination <= _n_threads, "Invariant");
60 os::naked_yield();
61 }
62
63 void TaskTerminator::reset_for_reuse() {
64 if (_offered_termination != 0) {
65 assert(_offered_termination == _n_threads,
66 "Terminator may still be in use");
67 _offered_termination = 0;
68 }
69 }
70
71 void TaskTerminator::reset_for_reuse(uint n_threads) {
72 reset_for_reuse();
73 _n_threads = n_threads;
74 }
75
76 bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
77 return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
78 }
79
80 size_t TaskTerminator::tasks_in_queue_set() const {
81 return _queue_set->tasks();
82 }
83
84 bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
85 assert(_n_threads > 0, "Initialization is incorrect");
86 assert(_offered_termination < _n_threads, "Invariant");
87 assert(_blocker != NULL, "Invariant");
88
89 // Single worker, done
90 if (_n_threads == 1) {
91 _offered_termination = 1;
92 assert_queue_set_empty();
93 return true;
94 }
95
96 _blocker->lock_without_safepoint_check();
97 _offered_termination++;
98 // All arrived, done
99 if (_offered_termination == _n_threads) {
100 _blocker->notify_all();
101 _blocker->unlock();
102 assert_queue_set_empty();
103 return true;
104 }
105
106 Thread* the_thread = Thread::current();
107 while (true) {
108 if (_spin_master == NULL) {
109 _spin_master = the_thread;
110
111 _blocker->unlock();
112
113 if (do_spin_master_work(terminator)) {
114 assert(_offered_termination == _n_threads, "termination condition");
115 assert_queue_set_empty();
116 return true;
117 } else {
118 _blocker->lock_without_safepoint_check();
119 // There is possibility that termination is reached between dropping the lock
120 // before returning from do_spin_master_work() and acquiring lock above.
121 if (_offered_termination == _n_threads) {
122 _blocker->unlock();
123 assert_queue_set_empty();
124 return true;
125 }
126 }
127 } else {
128 _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
129
130 if (_offered_termination == _n_threads) {
131 _blocker->unlock();
132 assert_queue_set_empty();
133 return true;
134 }
135 }
136
137 size_t tasks = tasks_in_queue_set();
138 if (exit_termination(tasks, terminator)) {
139 assert_lock_strong(_blocker);
140 _offered_termination--;
141 _blocker->unlock();
142 return false;
143 }
144 }
145 }
146
147 bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
148 uint yield_count = 0;
149 // Number of hard spin loops done since last yield
150 uint hard_spin_count = 0;
151 // Number of iterations in the hard spin loop.
152 uint hard_spin_limit = WorkStealingHardSpins;
|