135 if (ref.is_narrow()) {
136 deal_with_reference((narrowOop*)ref);
137 } else {
138 deal_with_reference((oop*)ref);
139 }
140 }
141
142 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
143 StarTask stolen_task;
144 while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
145 assert(verify_task(stolen_task), "sanity");
146 dispatch_reference(stolen_task);
147
148 // We've just processed a reference and we might have made
149 // available new entries on the queues. So we have to make sure
150 // we drain the queues as necessary.
151 trim_queue();
152 }
153 }
154
155 inline bool G1ParScanThreadState::should_start_trim_queue_partially() const {
156 return !_refs->overflow_empty() || _refs->size() > _stack_drain_upper_threshold;
157 }
158
159 inline bool G1ParScanThreadState::should_end_trim_queue_partially() const {
160 return _refs->overflow_empty() && _refs->size() <= _stack_drain_lower_threshold;
161 }
162
163 inline void G1ParScanThreadState::trim_queue_partially_internal() {
164 StarTask ref;
165 do {
166 // Drain the overflow stack first, so other threads can potentially steal.
167 while (_refs->pop_overflow(ref)) {
168 if (!_refs->try_push_to_taskqueue(ref)) {
169 dispatch_reference(ref);
170 }
171 }
172
173 while (_refs->pop_local(ref, _stack_drain_lower_threshold)) {
174 dispatch_reference(ref);
175 }
176 } while (!should_end_trim_queue_partially());
177 }
178
179 inline void G1ParScanThreadState::trim_queue_partially() {
180 if (should_start_trim_queue_partially()) {
181 const Ticks start = Ticks::now();
182 trim_queue_partially_internal();
183 _trim_ticks += Ticks::now() - start;
184 }
185 }
186
187 inline Tickspan G1ParScanThreadState::trim_ticks_and_reset() {
188 Tickspan result = _trim_ticks;
189 _trim_ticks = Tickspan();
190 return result;
191 }
192
193 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
135 if (ref.is_narrow()) {
136 deal_with_reference((narrowOop*)ref);
137 } else {
138 deal_with_reference((oop*)ref);
139 }
140 }
141
142 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
143 StarTask stolen_task;
144 while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
145 assert(verify_task(stolen_task), "sanity");
146 dispatch_reference(stolen_task);
147
148 // We've just processed a reference and we might have made
149 // available new entries on the queues. So we have to make sure
150 // we drain the queues as necessary.
151 trim_queue();
152 }
153 }
154
155 inline bool G1ParScanThreadState::needs_partial_trimming() const {
156 return !_refs->overflow_empty() || _refs->size() > _stack_drain_upper_threshold;
157 }
158
159 inline bool G1ParScanThreadState::is_partially_trimmed() const {
160 return _refs->overflow_empty() && _refs->size() <= _stack_drain_lower_threshold;
161 }
162
163 inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
164 StarTask ref;
165 // Drain the overflow stack first, so other threads can potentially steal.
166 while (_refs->pop_overflow(ref)) {
167 if (!_refs->try_push_to_taskqueue(ref)) {
168 dispatch_reference(ref);
169 }
170 }
171
172 while (_refs->pop_local(ref, threshold)) {
173 dispatch_reference(ref);
174 }
175 }
176
177 inline void G1ParScanThreadState::trim_queue_partially() {
178 if (!needs_partial_trimming()) {
179 return;
180 }
181
182 const Ticks start = Ticks::now();
183 do {
184 trim_queue_to_threshold(_stack_drain_lower_threshold);
185 } while (!is_partially_trimmed());
186 _trim_ticks += Ticks::now() - start;
187 }
188
189 inline Tickspan G1ParScanThreadState::trim_ticks() {
190 return _trim_ticks;
191 }
192
193 inline void G1ParScanThreadState::reset_trim_ticks() {
194 _trim_ticks = Tickspan();
195 }
196
197 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|