39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 G1RedirtyCardsQueueSet* rdcqs,
42 uint worker_id,
43 size_t young_cset_length,
44 size_t optional_cset_length)
45 : _g1h(g1h),
46 _refs(g1h->task_queue(worker_id)),
47 _rdcq(rdcqs),
48 _ct(g1h->card_table()),
49 _closures(NULL),
50 _plab_allocator(NULL),
51 _age_table(false),
52 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
53 _scanner(g1h, this),
54 _worker_id(worker_id),
55 _last_enqueued_card(SIZE_MAX),
56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
57 _stack_trim_lower_threshold(GCDrainStackTargetSize),
58 _trim_ticks(),
59 _surviving_young_words_base(NULL),
60 _surviving_young_words(NULL),
61 _surviving_words_length(young_cset_length + 1),
62 _old_gen_is_full(false),
63 _num_optional_regions(optional_cset_length),
64 _numa(g1h->numa()),
65 _obj_alloc_stat(NULL)
66 {
67 // We allocate number of young gen regions in the collection set plus one
68 // entries, since entry 0 keeps track of surviving bytes for non-young regions.
69 // We also add a few elements at the beginning and at the end in
70 // an attempt to eliminate cache contention
71 size_t array_length = PADDING_ELEM_NUM + _surviving_words_length + PADDING_ELEM_NUM;
72 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
73 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
74 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t));
75
76 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
77
78 // The dest for Young is used when the objects are aged enough to
79 // need to be moved to the next space.
80 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
81 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old;
82
83 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
84
85 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
86
87 initialize_numa_stats();
88 }
89
90 size_t G1ParScanThreadState::flush(size_t* surviving_young_words) {
91 _rdcq.flush();
92 flush_numa_stats();
93 // Update allocation statistics.
94 _plab_allocator->flush_and_retire_stats();
95 _g1h->policy()->record_age_table(&_age_table);
96
97 size_t sum = 0;
98 for (uint i = 0; i < _surviving_words_length; i++) {
99 surviving_young_words[i] += _surviving_young_words[i];
100 sum += _surviving_young_words[i];
101 }
102 return sum;
103 }
104
105 G1ParScanThreadState::~G1ParScanThreadState() {
106 delete _plab_allocator;
107 delete _closures;
108 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
109 delete[] _oops_into_optional_regions;
110 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
111 }
112
113 size_t G1ParScanThreadState::lab_waste_words() const {
114 return _plab_allocator->waste();
115 }
271 #ifndef PRODUCT
272 // Should this evacuation fail?
273 if (_g1h->evacuation_should_fail()) {
274 // Doing this after all the allocation attempts also tests the
275 // undo_allocation() method too.
276 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
277 return handle_evacuation_failure_par(old, old_mark);
278 }
279 #endif // !PRODUCT
280
281 // We're going to allocate linearly, so might as well prefetch ahead.
282 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
283
284 const oop obj = oop(obj_ptr);
285 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
286 if (forward_ptr == NULL) {
287 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
288
289 const uint young_index = from_region->young_index_in_cset();
290
291 assert((from_region->is_young() && young_index > 0) ||
292 (!from_region->is_young() && young_index == 0), "invariant" );
293
294 if (dest_attr.is_young()) {
295 if (age < markWord::max_age) {
296 age++;
297 }
298 if (old_mark.has_displaced_mark_helper()) {
299 // In this case, we have to install the mark word first,
300 // otherwise obj looks to be forwarded (the old mark word,
301 // which contains the forward pointer, was copied)
302 obj->set_mark_raw(old_mark);
303 markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
304 old_mark.set_displaced_mark_helper(new_mark);
305 } else {
306 obj->set_mark_raw(old_mark.set_age(age));
307 }
308 _age_table.add(age, word_sz);
309 } else {
310 obj->set_mark_raw(old_mark);
311 }
312
|
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 G1RedirtyCardsQueueSet* rdcqs,
42 uint worker_id,
43 size_t young_cset_length,
44 size_t optional_cset_length)
45 : _g1h(g1h),
46 _refs(g1h->task_queue(worker_id)),
47 _rdcq(rdcqs),
48 _ct(g1h->card_table()),
49 _closures(NULL),
50 _plab_allocator(NULL),
51 _age_table(false),
52 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
53 _scanner(g1h, this),
54 _worker_id(worker_id),
55 _last_enqueued_card(SIZE_MAX),
56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
57 _stack_trim_lower_threshold(GCDrainStackTargetSize),
58 _trim_ticks(),
59 _surviving_survivor_words(0),
60 _surviving_young_words_base(NULL),
61 _surviving_young_words(NULL),
62 _surviving_words_length(young_cset_length + 1),
63 _old_gen_is_full(false),
64 _num_optional_regions(optional_cset_length),
65 _numa(g1h->numa()),
66 _obj_alloc_stat(NULL)
67 {
68 // We allocate number of young gen regions in the collection set plus one
69 // entries, since entry 0 keeps track of surviving bytes for non-young regions.
70 // We also add a few elements at the beginning and at the end in
71 // an attempt to eliminate cache contention
72 size_t array_length = PADDING_ELEM_NUM + _surviving_words_length + PADDING_ELEM_NUM;
73 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
74 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
75 memset(_surviving_young_words, 0, _surviving_words_length * sizeof(size_t));
76
77 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
78
79 // The dest for Young is used when the objects are aged enough to
80 // need to be moved to the next space.
81 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
82 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old;
83
84 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
85
86 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
87
88 initialize_numa_stats();
89 }
90
91 size_t G1ParScanThreadState::flush(size_t* surviving_young_words) {
92 _rdcq.flush();
93 flush_numa_stats();
94 // Update allocation statistics.
95 _plab_allocator->flush_and_retire_stats();
96 _g1h->policy()->record_age_table(&_age_table);
97 _g1h->policy()->record_surviving_survivor_words(_surviving_survivor_words);
98
99 size_t sum = 0;
100 for (uint i = 0; i < _surviving_words_length; i++) {
101 surviving_young_words[i] += _surviving_young_words[i];
102 sum += _surviving_young_words[i];
103 }
104 return sum;
105 }
106
107 G1ParScanThreadState::~G1ParScanThreadState() {
108 delete _plab_allocator;
109 delete _closures;
110 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
111 delete[] _oops_into_optional_regions;
112 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
113 }
114
115 size_t G1ParScanThreadState::lab_waste_words() const {
116 return _plab_allocator->waste();
117 }
273 #ifndef PRODUCT
274 // Should this evacuation fail?
275 if (_g1h->evacuation_should_fail()) {
276 // Doing this after all the allocation attempts also tests the
277 // undo_allocation() method too.
278 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
279 return handle_evacuation_failure_par(old, old_mark);
280 }
281 #endif // !PRODUCT
282
283 // We're going to allocate linearly, so might as well prefetch ahead.
284 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
285
286 const oop obj = oop(obj_ptr);
287 const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
288 if (forward_ptr == NULL) {
289 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
290
291 const uint young_index = from_region->young_index_in_cset();
292
293 assert(from_region->is_young() ||
294 (!from_region->is_young() && young_index == 0), "region %u %s index %u", from_region->hrm_index(), from_region->get_short_type_str(), young_index);
295
296 if (age > 0 && region_attr.is_young()) {
297 _surviving_survivor_words += word_sz;
298 }
299
300 if (dest_attr.is_young()) {
301 if (age < markWord::max_age) {
302 age++;
303 }
304 if (old_mark.has_displaced_mark_helper()) {
305 // In this case, we have to install the mark word first,
306 // otherwise obj looks to be forwarded (the old mark word,
307 // which contains the forward pointer, was copied)
308 obj->set_mark_raw(old_mark);
309 markWord new_mark = old_mark.displaced_mark_helper().set_age(age);
310 old_mark.set_displaced_mark_helper(new_mark);
311 } else {
312 obj->set_mark_raw(old_mark.set_age(age));
313 }
314 _age_table.add(age, word_sz);
315 } else {
316 obj->set_mark_raw(old_mark);
317 }
318
|