40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 G1RedirtyCardsQueueSet* rdcqs,
42 uint worker_id,
43 size_t young_cset_length,
44 size_t optional_cset_length)
45 : _g1h(g1h),
46 _refs(g1h->task_queue(worker_id)),
47 _rdcq(rdcqs),
48 _ct(g1h->card_table()),
49 _closures(NULL),
50 _plab_allocator(NULL),
51 _age_table(false),
52 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
53 _scanner(g1h, this),
54 _worker_id(worker_id),
55 _last_enqueued_card(SIZE_MAX),
56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
57 _stack_trim_lower_threshold(GCDrainStackTargetSize),
58 _trim_ticks(),
59 _old_gen_is_full(false),
60 _num_optional_regions(optional_cset_length)
61 {
62 // We allocate number of young gen regions in the collection set plus one
63 // entries, since entry 0 keeps track of surviving bytes for non-young regions.
64 // We also add a few elements at the beginning and at the end in
65 // an attempt to eliminate cache contention
66 size_t real_length = young_cset_length + 1;
67 size_t array_length = PADDING_ELEM_NUM + real_length + PADDING_ELEM_NUM;
68 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
69 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
70 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
71
72 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
73
74 // The dest for Young is used when the objects are aged enough to
75 // need to be moved to the next space.
76 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
77 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old;
78
79 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
80
81 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
82 }
83
84 // Pass locally gathered statistics to global state.
85 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
86 _rdcq.flush();
87 // Update allocation statistics.
88 _plab_allocator->flush_and_retire_stats();
89 _g1h->policy()->record_age_table(&_age_table);
90
91 uint length = _g1h->collection_set()->young_region_length() + 1;
92 for (uint i = 0; i < length; i++) {
93 surviving_young_words[i] += _surviving_young_words[i];
94 }
95 }
96
97 G1ParScanThreadState::~G1ParScanThreadState() {
98 delete _plab_allocator;
99 delete _closures;
100 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
101 delete[] _oops_into_optional_regions;
102 }
103
104 size_t G1ParScanThreadState::lab_waste_words() const {
105 return _plab_allocator->waste();
106 }
107
108 size_t G1ParScanThreadState::lab_undo_waste_words() const {
109 return _plab_allocator->undo_waste();
110 }
111
112 #ifdef ASSERT
113 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
114 assert(ref != NULL, "invariant");
115 assert(UseCompressedOops, "sanity");
116 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
117 oop p = RawAccess<>::oop_load(ref);
118 assert(_g1h->is_in_g1_reserved(p),
119 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
120 return true;
121 }
231 }
232 HeapRegion* const from_region = _g1h->heap_region_containing(old);
233 uint node_index = from_region->node_index();
234
235 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
236
237 // PLAB allocations should succeed most of the time, so we'll
238 // normally check against NULL once and that's it.
239 if (obj_ptr == NULL) {
240 bool plab_refill_failed = false;
241 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
242 if (obj_ptr == NULL) {
243 assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
244 obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
245 if (obj_ptr == NULL) {
246 // This will either forward-to-self, or detect that someone else has
247 // installed a forwarding pointer.
248 return handle_evacuation_failure_par(old, old_mark);
249 }
250 }
251 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
252 // The events are checked individually as part of the actual commit
253 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
254 }
255 }
256
257 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
258 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
259
260 #ifndef PRODUCT
261 // Should this evacuation fail?
262 if (_g1h->evacuation_should_fail()) {
263 // Doing this after all the allocation attempts also tests the
264 // undo_allocation() method too.
265 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
266 return handle_evacuation_failure_par(old, old_mark);
267 }
268 #endif // !PRODUCT
269
270 // We're going to allocate linearly, so might as well prefetch ahead.
|
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 G1RedirtyCardsQueueSet* rdcqs,
42 uint worker_id,
43 size_t young_cset_length,
44 size_t optional_cset_length)
45 : _g1h(g1h),
46 _refs(g1h->task_queue(worker_id)),
47 _rdcq(rdcqs),
48 _ct(g1h->card_table()),
49 _closures(NULL),
50 _plab_allocator(NULL),
51 _age_table(false),
52 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
53 _scanner(g1h, this),
54 _worker_id(worker_id),
55 _last_enqueued_card(SIZE_MAX),
56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
57 _stack_trim_lower_threshold(GCDrainStackTargetSize),
58 _trim_ticks(),
59 _old_gen_is_full(false),
60 _num_optional_regions(optional_cset_length),
61 _numa(g1h->numa()),
62 _obj_alloc_stat(NULL)
63 {
64 // We allocate number of young gen regions in the collection set plus one
65 // entries, since entry 0 keeps track of surviving bytes for non-young regions.
66 // We also add a few elements at the beginning and at the end in
67 // an attempt to eliminate cache contention
68 size_t real_length = young_cset_length + 1;
69 size_t array_length = PADDING_ELEM_NUM + real_length + PADDING_ELEM_NUM;
70 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
71 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
72 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
73
74 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
75
76 // The dest for Young is used when the objects are aged enough to
77 // need to be moved to the next space.
78 _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
79 _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old;
80
81 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
82
83 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
84
85 initialize_numa_stats();
86 }
87
88 // Pass locally gathered statistics to global state.
89 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
90 _rdcq.flush();
91 // Update allocation statistics.
92 _plab_allocator->flush_and_retire_stats();
93 _g1h->policy()->record_age_table(&_age_table);
94
95 uint length = _g1h->collection_set()->young_region_length() + 1;
96 for (uint i = 0; i < length; i++) {
97 surviving_young_words[i] += _surviving_young_words[i];
98 }
99 flush_numa_stats();
100 }
101
102 G1ParScanThreadState::~G1ParScanThreadState() {
103 delete _plab_allocator;
104 delete _closures;
105 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
106 delete[] _oops_into_optional_regions;
107 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
108 }
109
110 size_t G1ParScanThreadState::lab_waste_words() const {
111 return _plab_allocator->waste();
112 }
113
114 size_t G1ParScanThreadState::lab_undo_waste_words() const {
115 return _plab_allocator->undo_waste();
116 }
117
118 #ifdef ASSERT
119 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
120 assert(ref != NULL, "invariant");
121 assert(UseCompressedOops, "sanity");
122 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
123 oop p = RawAccess<>::oop_load(ref);
124 assert(_g1h->is_in_g1_reserved(p),
125 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
126 return true;
127 }
237 }
238 HeapRegion* const from_region = _g1h->heap_region_containing(old);
239 uint node_index = from_region->node_index();
240
241 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
242
243 // PLAB allocations should succeed most of the time, so we'll
244 // normally check against NULL once and that's it.
245 if (obj_ptr == NULL) {
246 bool plab_refill_failed = false;
247 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
248 if (obj_ptr == NULL) {
249 assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
250 obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
251 if (obj_ptr == NULL) {
252 // This will either forward-to-self, or detect that someone else has
253 // installed a forwarding pointer.
254 return handle_evacuation_failure_par(old, old_mark);
255 }
256 }
257 update_numa_stats(node_index);
258
259 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
260 // The events are checked individually as part of the actual commit
261 report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
262 }
263 }
264
265 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
266 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
267
268 #ifndef PRODUCT
269 // Should this evacuation fail?
270 if (_g1h->evacuation_should_fail()) {
271 // Doing this after all the allocation attempts also tests the
272 // undo_allocation() method too.
273 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
274 return handle_evacuation_failure_par(old, old_mark);
275 }
276 #endif // !PRODUCT
277
278 // We're going to allocate linearly, so might as well prefetch ahead.
|