136
137 void G1ParScanThreadState::trim_queue() {
138 StarTask ref;
139 do {
140 // Drain the overflow stack first, so other threads can steal.
141 while (_refs->pop_overflow(ref)) {
142 if (!_refs->try_push_to_taskqueue(ref)) {
143 dispatch_reference(ref);
144 }
145 }
146
147 while (_refs->pop_local(ref)) {
148 dispatch_reference(ref);
149 }
150 } while (!_refs->is_empty());
151 }
152
153 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
154 InCSetState* dest,
155 size_t word_sz,
156 AllocationContext_t const context,
157 bool previous_plab_refill_failed) {
158 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
159 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
160
161 // Right now we only have two types of regions (young / old) so
162 // let's keep the logic here simple. We can generalize it when necessary.
163 if (dest->is_young()) {
164 bool plab_refill_in_old_failed = false;
165 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
166 word_sz,
167 context,
168 &plab_refill_in_old_failed);
169 // Make sure that we won't attempt to copy any other objects out
170 // of a survivor region (given that apparently we cannot allocate
171 // any new ones) to avoid coming into this slow path again and again.
172 // Only consider failed PLAB refill here: failed inline allocations are
173 // typically large, so not indicative of remaining space.
174 if (previous_plab_refill_failed) {
175 _tenuring_threshold = 0;
176 }
177
178 if (obj_ptr != NULL) {
179 dest->set_old();
180 } else {
181 // We just failed to allocate in old gen. The same idea as explained above
182 // for making survivor gen unavailable for allocation applies for old gen.
183 _old_gen_is_full = plab_refill_in_old_failed;
184 }
185 return obj_ptr;
186 } else {
187 _old_gen_is_full = previous_plab_refill_failed;
188 assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
189 // no other space to try.
190 return NULL;
191 }
192 }
193
194 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
195 if (state.is_young()) {
196 age = !m->has_displaced_mark_helper() ? m->age()
197 : m->displaced_mark_helper()->age();
198 if (age < _tenuring_threshold) {
199 return state;
200 }
201 }
202 return dest(state);
203 }
204
205 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
206 oop const old, size_t word_sz, uint age,
207 HeapWord * const obj_ptr,
208 const AllocationContext_t context) const {
209 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
210 if (alloc_buf->contains(obj_ptr)) {
211 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
212 dest_state.value() == InCSetState::Old,
213 alloc_buf->word_sz());
214 } else {
215 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
216 dest_state.value() == InCSetState::Old);
217 }
218 }
219
220 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
221 oop const old,
222 markOop const old_mark) {
223 const size_t word_sz = old->size();
224 HeapRegion* const from_region = _g1h->heap_region_containing(old);
225 // +1 to make the -1 indexes valid...
226 const int young_index = from_region->young_index_in_cset()+1;
227 assert( (from_region->is_young() && young_index > 0) ||
228 (!from_region->is_young() && young_index == 0), "invariant" );
229 const AllocationContext_t context = from_region->allocation_context();
230
231 uint age = 0;
232 InCSetState dest_state = next_state(state, old_mark, age);
233 // The second clause is to prevent premature evacuation failure in case there
234 // is still space in survivor, but old gen is full.
235 if (_old_gen_is_full && dest_state.is_old()) {
236 return handle_evacuation_failure_par(old, old_mark);
237 }
238 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
239
240 // PLAB allocations should succeed most of the time, so we'll
241 // normally check against NULL once and that's it.
242 if (obj_ptr == NULL) {
243 bool plab_refill_failed = false;
244 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);
245 if (obj_ptr == NULL) {
246 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed);
247 if (obj_ptr == NULL) {
248 // This will either forward-to-self, or detect that someone else has
249 // installed a forwarding pointer.
250 return handle_evacuation_failure_par(old, old_mark);
251 }
252 }
253 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
254 // The events are checked individually as part of the actual commit
255 report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
256 }
257 }
258
259 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
260 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
261
262 #ifndef PRODUCT
263 // Should this evacuation fail?
264 if (_g1h->evacuation_should_fail()) {
265 // Doing this after all the allocation attempts also tests the
266 // undo_allocation() method too.
267 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
268 return handle_evacuation_failure_par(old, old_mark);
269 }
270 #endif // !PRODUCT
271
272 // We're going to allocate linearly, so might as well prefetch ahead.
273 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
274
275 const oop obj = oop(obj_ptr);
276 const oop forward_ptr = old->forward_to_atomic(obj);
277 if (forward_ptr == NULL) {
278 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
279
280 if (dest_state.is_young()) {
281 if (age < markOopDesc::max_age) {
282 age++;
283 }
284 if (old_mark->has_displaced_mark_helper()) {
285 // In this case, we have to install the mark word first,
286 // otherwise obj looks to be forwarded (the old mark word,
287 // which contains the forward pointer, was copied)
308 _worker_id,
309 obj);
310 }
311
312 _surviving_young_words[young_index] += word_sz;
313
314 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
315 // We keep track of the next start index in the length field of
316 // the to-space object. The actual length can be found in the
317 // length field of the from-space object.
318 arrayOop(obj)->set_length(0);
319 oop* old_p = set_partial_array_mask(old);
320 push_on_queue(old_p);
321 } else {
322 HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
323 _scanner.set_region(to_region);
324 obj->oop_iterate_backwards(&_scanner);
325 }
326 return obj;
327 } else {
328 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
329 return forward_ptr;
330 }
331 }
332
333 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
334 assert(worker_id < _n_workers, "out of bounds access");
335 if (_states[worker_id] == NULL) {
336 _states[worker_id] = new_par_scan_state(worker_id, _young_cset_length);
337 }
338 return _states[worker_id];
339 }
340
341 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
342 assert(_flushed, "thread local state from the per thread states should have been flushed");
343 return _surviving_young_words_total;
344 }
345
346 void G1ParScanThreadStateSet::flush() {
347 assert(!_flushed, "thread local state from the per thread states should be flushed once");
348
|
136
137 void G1ParScanThreadState::trim_queue() {
138 StarTask ref;
139 do {
140 // Drain the overflow stack first, so other threads can steal.
141 while (_refs->pop_overflow(ref)) {
142 if (!_refs->try_push_to_taskqueue(ref)) {
143 dispatch_reference(ref);
144 }
145 }
146
147 while (_refs->pop_local(ref)) {
148 dispatch_reference(ref);
149 }
150 } while (!_refs->is_empty());
151 }
152
153 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
154 InCSetState* dest,
155 size_t word_sz,
156 bool previous_plab_refill_failed) {
157 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
158 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
159
160 // Right now we only have two types of regions (young / old) so
161 // let's keep the logic here simple. We can generalize it when necessary.
162 if (dest->is_young()) {
163 bool plab_refill_in_old_failed = false;
164 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
165 word_sz,
166 &plab_refill_in_old_failed);
167 // Make sure that we won't attempt to copy any other objects out
168 // of a survivor region (given that apparently we cannot allocate
169 // any new ones) to avoid coming into this slow path again and again.
170 // Only consider failed PLAB refill here: failed inline allocations are
171 // typically large, so not indicative of remaining space.
172 if (previous_plab_refill_failed) {
173 _tenuring_threshold = 0;
174 }
175
176 if (obj_ptr != NULL) {
177 dest->set_old();
178 } else {
179 // We just failed to allocate in old gen. The same idea as explained above
180 // for making survivor gen unavailable for allocation applies for old gen.
181 _old_gen_is_full = plab_refill_in_old_failed;
182 }
183 return obj_ptr;
184 } else {
185 _old_gen_is_full = previous_plab_refill_failed;
186 assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
187 // no other space to try.
188 return NULL;
189 }
190 }
191
192 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
193 if (state.is_young()) {
194 age = !m->has_displaced_mark_helper() ? m->age()
195 : m->displaced_mark_helper()->age();
196 if (age < _tenuring_threshold) {
197 return state;
198 }
199 }
200 return dest(state);
201 }
202
203 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
204 oop const old, size_t word_sz, uint age,
205 HeapWord * const obj_ptr) const {
206 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state);
207 if (alloc_buf->contains(obj_ptr)) {
208 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
209 dest_state.value() == InCSetState::Old,
210 alloc_buf->word_sz());
211 } else {
212 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age,
213 dest_state.value() == InCSetState::Old);
214 }
215 }
216
217 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
218 oop const old,
219 markOop const old_mark) {
220 const size_t word_sz = old->size();
221 HeapRegion* const from_region = _g1h->heap_region_containing(old);
222 // +1 to make the -1 indexes valid...
223 const int young_index = from_region->young_index_in_cset()+1;
224 assert( (from_region->is_young() && young_index > 0) ||
225 (!from_region->is_young() && young_index == 0), "invariant" );
226
227 uint age = 0;
228 InCSetState dest_state = next_state(state, old_mark, age);
229 // The second clause is to prevent premature evacuation failure in case there
230 // is still space in survivor, but old gen is full.
231 if (_old_gen_is_full && dest_state.is_old()) {
232 return handle_evacuation_failure_par(old, old_mark);
233 }
234 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz);
235
236 // PLAB allocations should succeed most of the time, so we'll
237 // normally check against NULL once and that's it.
238 if (obj_ptr == NULL) {
239 bool plab_refill_failed = false;
240 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed);
241 if (obj_ptr == NULL) {
242 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed);
243 if (obj_ptr == NULL) {
244 // This will either forward-to-self, or detect that someone else has
245 // installed a forwarding pointer.
246 return handle_evacuation_failure_par(old, old_mark);
247 }
248 }
249 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
250 // The events are checked individually as part of the actual commit
251 report_promotion_event(dest_state, old, word_sz, age, obj_ptr);
252 }
253 }
254
255 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
256 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
257
258 #ifndef PRODUCT
259 // Should this evacuation fail?
260 if (_g1h->evacuation_should_fail()) {
261 // Doing this after all the allocation attempts also tests the
262 // undo_allocation() method too.
263 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
264 return handle_evacuation_failure_par(old, old_mark);
265 }
266 #endif // !PRODUCT
267
268 // We're going to allocate linearly, so might as well prefetch ahead.
269 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
270
271 const oop obj = oop(obj_ptr);
272 const oop forward_ptr = old->forward_to_atomic(obj);
273 if (forward_ptr == NULL) {
274 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
275
276 if (dest_state.is_young()) {
277 if (age < markOopDesc::max_age) {
278 age++;
279 }
280 if (old_mark->has_displaced_mark_helper()) {
281 // In this case, we have to install the mark word first,
282 // otherwise obj looks to be forwarded (the old mark word,
283 // which contains the forward pointer, was copied)
304 _worker_id,
305 obj);
306 }
307
308 _surviving_young_words[young_index] += word_sz;
309
310 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
311 // We keep track of the next start index in the length field of
312 // the to-space object. The actual length can be found in the
313 // length field of the from-space object.
314 arrayOop(obj)->set_length(0);
315 oop* old_p = set_partial_array_mask(old);
316 push_on_queue(old_p);
317 } else {
318 HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
319 _scanner.set_region(to_region);
320 obj->oop_iterate_backwards(&_scanner);
321 }
322 return obj;
323 } else {
324 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
325 return forward_ptr;
326 }
327 }
328
329 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
330 assert(worker_id < _n_workers, "out of bounds access");
331 if (_states[worker_id] == NULL) {
332 _states[worker_id] = new_par_scan_state(worker_id, _young_cset_length);
333 }
334 return _states[worker_id];
335 }
336
337 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
338 assert(_flushed, "thread local state from the per thread states should have been flushed");
339 return _surviving_young_words_total;
340 }
341
342 void G1ParScanThreadStateSet::flush() {
343 assert(!_flushed, "thread local state from the per thread states should be flushed once");
344
|