138
139 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const {
140 // Must be in the collection set--it's already been copied.
141 oop p = task.to_source_array();
142 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
143 }
144
145 void G1ParScanThreadState::verify_task(ScannerTask task) const {
146 if (task.is_narrow_oop_ptr()) {
147 verify_task(task.to_narrow_oop_ptr());
148 } else if (task.is_oop_ptr()) {
149 verify_task(task.to_oop_ptr());
150 } else if (task.is_partial_array_task()) {
151 verify_task(task.to_partial_array_task());
152 } else {
153 ShouldNotReachHere();
154 }
155 }
156 #endif // ASSERT
157
158 void G1ParScanThreadState::trim_queue() {
159 do {
160 // Fully drain the queue.
161 trim_queue_to_threshold(0);
162 } while (!_task_queue->is_empty());
163 }
164
165 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
166 size_t word_sz,
167 bool previous_plab_refill_failed,
168 uint node_index) {
169
170 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
171
172 // Right now we only have two types of regions (young / old) so
173 // let's keep the logic here simple. We can generalize it when necessary.
174 if (dest->is_young()) {
175 bool plab_refill_in_old_failed = false;
176 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
177 word_sz,
178 &plab_refill_in_old_failed,
179 node_index);
180 // Make sure that we won't attempt to copy any other objects out
181 // of a survivor region (given that apparently we cannot allocate
182 // any new ones) to avoid coming into this slow path again and again.
210 return region_attr;
211 }
212 }
213 return dest(region_attr);
214 }
215
216 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
217 oop const old, size_t word_sz, uint age,
218 HeapWord * const obj_ptr, uint node_index) const {
219 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
220 if (alloc_buf->contains(obj_ptr)) {
221 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
222 dest_attr.type() == G1HeapRegionAttr::Old,
223 alloc_buf->word_sz() * HeapWordSize);
224 } else {
225 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
226 dest_attr.type() == G1HeapRegionAttr::Old);
227 }
228 }
229
230 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
231 oop const old,
232 markWord const old_mark) {
233 const size_t word_sz = old->size();
234
235 uint age = 0;
236 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
237 // The second clause is to prevent premature evacuation failure in case there
238 // is still space in survivor, but old gen is full.
239 if (_old_gen_is_full && dest_attr.is_old()) {
240 return handle_evacuation_failure_par(old, old_mark);
241 }
242 HeapRegion* const from_region = _g1h->heap_region_containing(old);
243 uint node_index = from_region->node_index();
244
245 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
246
247 // PLAB allocations should succeed most of the time, so we'll
248 // normally check against NULL once and that's it.
249 if (obj_ptr == NULL) {
250 bool plab_refill_failed = false;
251 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
252 if (obj_ptr == NULL) {
253 assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
254 obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
255 if (obj_ptr == NULL) {
256 // This will either forward-to-self, or detect that someone else has
257 // installed a forwarding pointer.
258 return handle_evacuation_failure_par(old, old_mark);
259 }
260 }
261 update_numa_stats(node_index);
262
263 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
264 // The events are checked individually as part of the actual commit
265 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
266 }
267 }
268
269 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
270 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
271
272 #ifndef PRODUCT
273 // Should this evacuation fail?
274 if (_g1h->evacuation_should_fail()) {
275 // Doing this after all the allocation attempts also tests the
276 // undo_allocation() method too.
277 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
278 return handle_evacuation_failure_par(old, old_mark);
279 }
280 #endif // !PRODUCT
281
282 // We're going to allocate linearly, so might as well prefetch ahead.
283 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
284
285 const oop obj = oop(obj_ptr);
286 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
287 if (forward_ptr == NULL) {
288 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
289
290 const uint young_index = from_region->young_index_in_cset();
291
292 assert((from_region->is_young() && young_index > 0) ||
293 (!from_region->is_young() && young_index == 0), "invariant" );
294
295 if (dest_attr.is_young()) {
296 if (age < markWord::max_age) {
297 age++;
298 }
299 if (old_mark.has_displaced_mark_helper()) {
300 // In this case, we have to install the mark word first,
301 // otherwise obj looks to be forwarded (the old mark word,
302 // which contains the forward pointer, was copied)
303 obj->set_mark_raw(old_mark);
304 markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
305 old_mark.set_displaced_mark_helper(new_mark);
306 } else {
307 obj->set_mark_raw(old_mark.set_age(age));
308 }
309 _age_table.add(age, word_sz);
310 } else {
311 obj->set_mark_raw(old_mark);
312 }
313
314 if (G1StringDedup::is_enabled()) {
315 const bool is_from_young = region_attr.is_young();
316 const bool is_to_young = dest_attr.is_young();
317 assert(is_from_young == from_region->is_young(),
318 "sanity");
319 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
320 "sanity");
321 G1StringDedup::enqueue_from_evacuation(is_from_young,
322 is_to_young,
323 _worker_id,
324 obj);
325 }
326
327 _surviving_young_words[young_index] += word_sz;
328
329 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
330 // We keep track of the next start index in the length field of
331 // the to-space object. The actual length can be found in the
332 // length field of the from-space object.
333 arrayOop(obj)->set_length(0);
334 do_partial_array(PartialArrayScanTask(old));
335 } else {
336 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
337 obj->oop_iterate_backwards(&_scanner);
338 }
339 return obj;
340 } else {
341 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
342 return forward_ptr;
343 }
344 }
345
346 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
347 assert(worker_id < _n_workers, "out of bounds access");
348 if (_states[worker_id] == NULL) {
349 _states[worker_id] =
350 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length);
351 }
352 return _states[worker_id];
353 }
354
355 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
356 assert(_flushed, "thread local state from the per thread states should have been flushed");
357 return _surviving_young_words_total;
358 }
359
360 void G1ParScanThreadStateSet::flush() {
361 assert(!_flushed, "thread local state from the per thread states should be flushed once");
362
363 for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) {
364 G1ParScanThreadState* pss = _states[worker_id];
365
381
382 delete pss;
383 _states[worker_id] = NULL;
384 }
385 _flushed = true;
386 }
387
388 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
389 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
390 G1ParScanThreadState* pss = _states[worker_index];
391
392 if (pss == NULL) {
393 continue;
394 }
395
396 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
397 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
398 }
399 }
400
401 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
402 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
403
404 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
405 if (forward_ptr == NULL) {
406 // Forward-to-self succeeded. We are the "owner" of the object.
407 HeapRegion* r = _g1h->heap_region_containing(old);
408
409 if (!r->evacuation_failed()) {
410 r->set_evacuation_failed(true);
411 _g1h->hr_printer()->evac_failure(r);
412 }
413
414 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
415
416 G1ScanInYoungSetter x(&_scanner, r->is_young());
417 old->oop_iterate_backwards(&_scanner);
418
419 return old;
420 } else {
421 // Forward-to-self failed. Either someone else managed to allocate
422 // space for this object (old != forward_ptr) or they beat us in
423 // self-forwarding it (old == forward_ptr).
424 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
425 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
426 "should not be in the CSet",
427 p2i(old), p2i(forward_ptr));
428 return forward_ptr;
429 }
430 }
431 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
432 G1RedirtyCardsQueueSet* rdcqs,
433 uint n_workers,
434 size_t young_cset_length,
435 size_t optional_cset_length) :
436 _g1h(g1h),
437 _rdcqs(rdcqs),
438 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
439 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)),
440 _young_cset_length(young_cset_length),
441 _optional_cset_length(optional_cset_length),
442 _n_workers(n_workers),
443 _flushed(false) {
444 for (uint i = 0; i < n_workers; ++i) {
445 _states[i] = NULL;
446 }
447 memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t));
448 }
449
450 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
|
138
139 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const {
140 // Must be in the collection set--it's already been copied.
141 oop p = task.to_source_array();
142 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
143 }
144
145 void G1ParScanThreadState::verify_task(ScannerTask task) const {
146 if (task.is_narrow_oop_ptr()) {
147 verify_task(task.to_narrow_oop_ptr());
148 } else if (task.is_oop_ptr()) {
149 verify_task(task.to_oop_ptr());
150 } else if (task.is_partial_array_task()) {
151 verify_task(task.to_partial_array_task());
152 } else {
153 ShouldNotReachHere();
154 }
155 }
156 #endif // ASSERT
157
158 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
159 // Reference should not be NULL here as such are never pushed to the task queue.
160 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
161
162 // Although we never intentionally push references outside of the collection
163 // set, due to (benign) races in the claim mechanism during RSet scanning more
164 // than one thread might claim the same card. So the same card may be
165 // processed multiple times, and so we might get references into old gen here.
166 // So we need to redo this check.
167 const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
168 // References pushed onto the work stack should never point to a humongous region
169 // as they are not added to the collection set due to above precondition.
170 assert(!region_attr.is_humongous(),
171 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
172 p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
173
174 if (!region_attr.is_in_cset()) {
175 // In this case somebody else already did all the work.
176 return;
177 }
178
179 markWord m = obj->mark_raw();
180 if (m.is_marked()) {
181 obj = (oop) m.decode_pointer();
182 } else {
183 obj = do_copy_to_survivor_space(region_attr, obj, m);
184 }
185 RawAccess<IS_NOT_NULL>::oop_store(p, obj);
186
187 assert(obj != NULL, "Must be");
188 if (HeapRegion::is_in_same_region(p, obj)) {
189 return;
190 }
191 HeapRegion* from = _g1h->heap_region_containing(p);
192 if (!from->is_young()) {
193 enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
194 }
195 }
196
197 void G1ParScanThreadState::do_partial_array(PartialArrayScanTask task) {
198 oop from_obj = task.to_source_array();
199
200 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
201 assert(from_obj->is_objArray(), "must be obj array");
202 objArrayOop from_obj_array = objArrayOop(from_obj);
203 // The from-space object contains the real length.
204 int length = from_obj_array->length();
205
206 assert(from_obj->is_forwarded(), "must be forwarded");
207 oop to_obj = from_obj->forwardee();
208 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
209 objArrayOop to_obj_array = objArrayOop(to_obj);
210 // We keep track of the next start index in the length field of the
211 // to-space object.
212 int next_index = to_obj_array->length();
213 assert(0 <= next_index && next_index < length,
214 "invariant, next index: %d, length: %d", next_index, length);
215
216 int start = next_index;
217 int end = length;
218 int remainder = end - start;
219 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
220 if (remainder > 2 * ParGCArrayScanChunk) {
221 end = start + ParGCArrayScanChunk;
222 to_obj_array->set_length(end);
223 // Push the remainder before we process the range in case another
224 // worker has run out of things to do and can steal it.
225 push_on_queue(ScannerTask(PartialArrayScanTask(from_obj)));
226 } else {
227 assert(length == end, "sanity");
228 // We'll process the final range for this object. Restore the length
229 // so that the heap remains parsable in case of evacuation failure.
230 to_obj_array->set_length(end);
231 }
232
233 HeapRegion* hr = _g1h->heap_region_containing(to_obj);
234 G1ScanInYoungSetter x(&_scanner, hr->is_young());
235 // Process indexes [start,end). It will also process the header
236 // along with the first chunk (i.e., the chunk with start == 0).
237 // Note that at this point the length field of to_obj_array is not
238 // correct given that we are using it to keep track of the next
239 // start index. oop_iterate_range() (thankfully!) ignores the length
240 // field and only relies on the start / end parameters. It does
241 // however return the size of the object which will be incorrect. So
242 // we have to ignore it even if we wanted to use it.
243 to_obj_array->oop_iterate_range(&_scanner, start, end);
244 }
245
246 void G1ParScanThreadState::dispatch_task(ScannerTask task) {
247 verify_task(task);
248 if (task.is_narrow_oop_ptr()) {
249 do_oop_evac(task.to_narrow_oop_ptr());
250 } else if (task.is_oop_ptr()) {
251 do_oop_evac(task.to_oop_ptr());
252 } else {
253 do_partial_array(task.to_partial_array_task());
254 }
255 }
256
257 // Process tasks until overflow queue is empty and local queue
258 // contains no more than threshold entries. NOINLINE to prevent
259 // inlining into steal_and_trim_queue.
260 ATTRIBUTE_FLATTEN NOINLINE
261 void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
262 ScannerTask task;
263 do {
264 while (_task_queue->pop_overflow(task)) {
265 if (!_task_queue->try_push_to_taskqueue(task)) {
266 dispatch_task(task);
267 }
268 }
269 while (_task_queue->pop_local(task, threshold)) {
270 dispatch_task(task);
271 }
272 } while (!_task_queue->overflow_empty());
273 }
274
275 ATTRIBUTE_FLATTEN
276 void G1ParScanThreadState::steal_and_trim_queue(G1ScannerTasksQueueSet* task_queues) {
277 ScannerTask stolen_task;
278 while (task_queues->steal(_worker_id, stolen_task)) {
279 dispatch_task(stolen_task);
280 // Processing stolen task may have added tasks to our queue.
281 trim_queue();
282 }
283 }
284
285 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
286 size_t word_sz,
287 bool previous_plab_refill_failed,
288 uint node_index) {
289
290 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
291
292 // Right now we only have two types of regions (young / old) so
293 // let's keep the logic here simple. We can generalize it when necessary.
294 if (dest->is_young()) {
295 bool plab_refill_in_old_failed = false;
296 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
297 word_sz,
298 &plab_refill_in_old_failed,
299 node_index);
300 // Make sure that we won't attempt to copy any other objects out
301 // of a survivor region (given that apparently we cannot allocate
302 // any new ones) to avoid coming into this slow path again and again.
330 return region_attr;
331 }
332 }
333 return dest(region_attr);
334 }
335
336 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
337 oop const old, size_t word_sz, uint age,
338 HeapWord * const obj_ptr, uint node_index) const {
339 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
340 if (alloc_buf->contains(obj_ptr)) {
341 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
342 dest_attr.type() == G1HeapRegionAttr::Old,
343 alloc_buf->word_sz() * HeapWordSize);
344 } else {
345 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
346 dest_attr.type() == G1HeapRegionAttr::Old);
347 }
348 }
349
350 NOINLINE
351 HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
352 oop old,
353 size_t word_sz,
354 uint age,
355 uint node_index) {
356 HeapWord* obj_ptr = NULL;
357 // Try slow-path allocation unless we're allocating old and old is already full.
358 if (!(dest_attr->is_old() && _old_gen_is_full)) {
359 bool plab_refill_failed = false;
360 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
361 word_sz,
362 &plab_refill_failed,
363 node_index);
364 if (obj_ptr == NULL) {
365 obj_ptr = allocate_in_next_plab(dest_attr,
366 word_sz,
367 plab_refill_failed,
368 node_index);
369 }
370 }
371 if (obj_ptr != NULL) {
372 update_numa_stats(node_index);
373 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
374 // The events are checked individually as part of the actual commit
375 report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
376 }
377 }
378 return obj_ptr;
379 }
380
381 NOINLINE
382 void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
383 HeapWord* obj_ptr,
384 size_t word_sz,
385 uint node_index) {
386 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
387 }
388
389 // Private inline function, for direct internal use and providing the
390 // implementation of the public not-inline function.
391 oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
392 oop const old,
393 markWord const old_mark) {
394 assert(region_attr.is_in_cset(),
395 "Unexpected region attr type: %s", region_attr.get_type_str());
396
397 const size_t word_sz = old->size();
398
399 uint age = 0;
400 G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
401 HeapRegion* const from_region = _g1h->heap_region_containing(old);
402 uint node_index = from_region->node_index();
403
404 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
405
406 // PLAB allocations should succeed most of the time, so we'll
407 // normally check against NULL once and that's it.
408 if (obj_ptr == NULL) {
409 obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
410 if (obj_ptr == NULL) {
411 // This will either forward-to-self, or detect that someone else has
412 // installed a forwarding pointer.
413 return handle_evacuation_failure_par(old, old_mark);
414 }
415 }
416
417 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
418 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
419
420 #ifndef PRODUCT
421 // Should this evacuation fail?
422 if (_g1h->evacuation_should_fail()) {
423 // Doing this after all the allocation attempts also tests the
424 // undo_allocation() method too.
425 undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
426 return handle_evacuation_failure_par(old, old_mark);
427 }
428 #endif // !PRODUCT
429
430 // We're going to allocate linearly, so might as well prefetch ahead.
431 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
432
433 const oop obj = oop(obj_ptr);
434 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
435 if (forward_ptr == NULL) {
436 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
437
438 {
439 const uint young_index = from_region->young_index_in_cset();
440 assert((from_region->is_young() && young_index > 0) ||
441 (!from_region->is_young() && young_index == 0), "invariant" );
442 _surviving_young_words[young_index] += word_sz;
443 }
444
445 if (dest_attr.is_young()) {
446 if (age < markWord::max_age) {
447 age++;
448 }
449 if (old_mark.has_displaced_mark_helper()) {
450 // In this case, we have to install the mark word first,
451 // otherwise obj looks to be forwarded (the old mark word,
452 // which contains the forward pointer, was copied)
453 obj->set_mark_raw(old_mark);
454 markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
455 old_mark.set_displaced_mark_helper(new_mark);
456 } else {
457 obj->set_mark_raw(old_mark.set_age(age));
458 }
459 _age_table.add(age, word_sz);
460 } else {
461 obj->set_mark_raw(old_mark);
462 }
463
464 if (G1StringDedup::is_enabled()) {
465 const bool is_from_young = region_attr.is_young();
466 const bool is_to_young = dest_attr.is_young();
467 assert(is_from_young == from_region->is_young(),
468 "sanity");
469 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
470 "sanity");
471 G1StringDedup::enqueue_from_evacuation(is_from_young,
472 is_to_young,
473 _worker_id,
474 obj);
475 }
476
477 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
478 // We keep track of the next start index in the length field of
479 // the to-space object. The actual length can be found in the
480 // length field of the from-space object.
481 arrayOop(obj)->set_length(0);
482 do_partial_array(PartialArrayScanTask(old));
483 } else {
484 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
485 obj->oop_iterate_backwards(&_scanner);
486 }
487 return obj;
488 } else {
489 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
490 return forward_ptr;
491 }
492 }
493
494 // Public not-inline entry point.
495 ATTRIBUTE_FLATTEN
496 oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr region_attr,
497 oop old,
498 markWord old_mark) {
499 return do_copy_to_survivor_space(region_attr, old, old_mark);
500 }
501
502 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
503 assert(worker_id < _n_workers, "out of bounds access");
504 if (_states[worker_id] == NULL) {
505 _states[worker_id] =
506 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length);
507 }
508 return _states[worker_id];
509 }
510
511 const size_t* G1ParScanThreadStateSet::surviving_young_words() const {
512 assert(_flushed, "thread local state from the per thread states should have been flushed");
513 return _surviving_young_words_total;
514 }
515
516 void G1ParScanThreadStateSet::flush() {
517 assert(!_flushed, "thread local state from the per thread states should be flushed once");
518
519 for (uint worker_id = 0; worker_id < _n_workers; ++worker_id) {
520 G1ParScanThreadState* pss = _states[worker_id];
521
537
538 delete pss;
539 _states[worker_id] = NULL;
540 }
541 _flushed = true;
542 }
543
544 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
545 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
546 G1ParScanThreadState* pss = _states[worker_index];
547
548 if (pss == NULL) {
549 continue;
550 }
551
552 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
553 _g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
554 }
555 }
556
557 NOINLINE
558 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
559 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
560
561 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
562 if (forward_ptr == NULL) {
563 // Forward-to-self succeeded. We are the "owner" of the object.
564 HeapRegion* r = _g1h->heap_region_containing(old);
565
566 if (!r->evacuation_failed()) {
567 r->set_evacuation_failed(true);
568 _g1h->hr_printer()->evac_failure(r);
569 }
570
571 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
572
573 G1ScanInYoungSetter x(&_scanner, r->is_young());
574 old->oop_iterate_backwards(&_scanner);
575
576 return old;
577 } else {
578 // Forward-to-self failed. Either someone else managed to allocate
579 // space for this object (old != forward_ptr) or they beat us in
580 // self-forwarding it (old == forward_ptr).
581 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr),
582 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " "
583 "should not be in the CSet",
584 p2i(old), p2i(forward_ptr));
585 return forward_ptr;
586 }
587 }
588
589 void G1ParScanThreadState::initialize_numa_stats() {
590 if (_numa->is_enabled()) {
591 LogTarget(Info, gc, heap, numa) lt;
592
593 if (lt.is_enabled()) {
594 uint num_nodes = _numa->num_active_nodes();
595 // Record only if there are multiple active nodes.
596 _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
597 memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
598 }
599 }
600 }
601
602 void G1ParScanThreadState::flush_numa_stats() {
603 if (_obj_alloc_stat != NULL) {
604 uint node_index = _numa->index_of_current_thread();
605 _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
606 }
607 }
608
609 void G1ParScanThreadState::update_numa_stats(uint node_index) {
610 if (_obj_alloc_stat != NULL) {
611 _obj_alloc_stat[node_index]++;
612 }
613 }
614
615 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
616 G1RedirtyCardsQueueSet* rdcqs,
617 uint n_workers,
618 size_t young_cset_length,
619 size_t optional_cset_length) :
620 _g1h(g1h),
621 _rdcqs(rdcqs),
622 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
623 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)),
624 _young_cset_length(young_cset_length),
625 _optional_cset_length(optional_cset_length),
626 _n_workers(n_workers),
627 _flushed(false) {
628 for (uint i = 0; i < n_workers; ++i) {
629 _states[i] = NULL;
630 }
631 memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t));
632 }
633
634 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {
|