49
50 double G1CollectionSet::predict_region_non_copy_time_ms(HeapRegion* hr) const {
51 return _policy->predict_region_non_copy_time_ms(hr, collector_state()->in_young_only_phase());
52 }
53
54 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
55 _g1h(g1h),
56 _policy(policy),
57 _candidates(NULL),
58 _eden_region_length(0),
59 _survivor_region_length(0),
60 _old_region_length(0),
61 _collection_set_regions(NULL),
62 _collection_set_cur_length(0),
63 _collection_set_max_length(0),
64 _num_optional_regions(0),
65 _bytes_used_before(0),
66 _recorded_rs_length(0),
67 _inc_build_state(Inactive),
68 _inc_part_start(0),
69 _inc_collection_set_stats(NULL),
70 _inc_bytes_used_before(0),
71 _inc_recorded_rs_length(0),
72 _inc_recorded_rs_length_diff(0),
73 _inc_predicted_non_copy_time_ms(0.0),
74 _inc_predicted_non_copy_time_ms_diff(0.0) {
75 }
76
77 G1CollectionSet::~G1CollectionSet() {
78 FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
79 FREE_C_HEAP_ARRAY(IncCollectionSetRegionStat, _inc_collection_set_stats);
80 free_optional_regions();
81 clear_candidates();
82 }
83
84 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
85 uint survivor_cset_region_length) {
86 assert_at_safepoint_on_vm_thread();
87
88 _eden_region_length = eden_cset_region_length;
145 hr->set_index_in_opt_cset(_num_optional_regions++);
146 }
147
148 void G1CollectionSet::start_incremental_building() {
149 assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
150 assert(_inc_build_state == Inactive, "Precondition");
151 #ifdef ASSERT
152 for (size_t i = 0; i < _collection_set_max_length; i++) {
153 _inc_collection_set_stats[i].reset();
154 }
155 #endif
156
157 _inc_bytes_used_before = 0;
158
159 _inc_recorded_rs_length = 0;
160 _inc_recorded_rs_length_diff = 0;
161 _inc_predicted_non_copy_time_ms = 0.0;
162 _inc_predicted_non_copy_time_ms_diff = 0.0;
163
164 update_incremental_marker();
165 }
166
167 void G1CollectionSet::finalize_incremental_building() {
168 assert(_inc_build_state == Active, "Precondition");
169 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
170
171 // The two "main" fields, _inc_recorded_rs_length and
172 // _inc_predicted_non_copy_time_ms, are updated by the thread
173 // that adds a new region to the CSet. Further updates by the
174 // concurrent refinement thread that samples the young RSet lengths
175 // are accumulated in the *_diff fields. Here we add the diffs to
176 // the "main" fields.
177
178 _inc_recorded_rs_length += _inc_recorded_rs_length_diff;
179 _inc_predicted_non_copy_time_ms += _inc_predicted_non_copy_time_ms_diff;
180
181 _inc_recorded_rs_length_diff = 0;
182 _inc_predicted_non_copy_time_ms_diff = 0.0;
183 }
184
289 size_t rs_length = hr->rem_set()->occupied();
290 double non_copy_time = predict_region_non_copy_time_ms(hr);
291
292 // Cache the values we have added to the aggregated information
293 // in the heap region in case we have to remove this region from
294 // the incremental collection set, or it is updated by the
295 // rset sampling code
296
297 IncCollectionSetRegionStat* stat = &_inc_collection_set_stats[hr->hrm_index()];
298 stat->_rs_length = rs_length;
299 stat->_non_copy_time_ms = non_copy_time;
300
301 _inc_recorded_rs_length += rs_length;
302 _inc_predicted_non_copy_time_ms += non_copy_time;
303 _inc_bytes_used_before += hr->used();
304 }
305
306 assert(!hr->in_collection_set(), "invariant");
307 _g1h->register_young_region_with_region_attr(hr);
308
309 // We use UINT_MAX as "invalid" marker in verification.
310 assert(_collection_set_cur_length < (UINT_MAX - 1),
311 "Collection set is too large with " SIZE_FORMAT " entries", _collection_set_cur_length);
312 hr->set_young_index_in_cset((uint)_collection_set_cur_length + 1);
313
314 _collection_set_regions[_collection_set_cur_length] = hr->hrm_index();
315 // Concurrent readers must observe the store of the value in the array before an
316 // update to the length field.
317 OrderAccess::storestore();
318 _collection_set_cur_length++;
319 assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
320 }
321
322 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
323 assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
324 add_young_region_common(hr);
325 }
326
327 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
328 assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
329 add_young_region_common(hr);
330 }
331
332 #ifndef PRODUCT
333 class G1VerifyYoungAgesClosure : public HeapRegionClosure {
334 public:
335 bool _valid;
336
337 G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
338
339 virtual bool do_heap_region(HeapRegion* r) {
340 guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
341
342 if (!r->has_surv_rate_group()) {
343 log_error(gc, verify)("## encountered young region without surv_rate_group");
344 _valid = false;
345 }
346
347 if (!r->has_valid_age_in_surv_rate()) {
348 log_error(gc, verify)("## encountered invalid age in young region");
349 _valid = false;
350 }
351
352 return false;
353 }
354
355 bool valid() const { return _valid; }
356 };
357
358 bool G1CollectionSet::verify_young_ages() {
359 assert_at_safepoint_on_vm_thread();
360
361 G1VerifyYoungAgesClosure cl;
362 iterate(&cl);
363
364 if (!cl.valid()) {
365 LogStreamHandle(Error, gc, verify) log;
366 print(&log);
367 }
368
542
543 #ifdef ASSERT
544 class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
545 private:
546 size_t _young_length;
547 uint* _heap_region_indices;
548 public:
549 G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
550 _heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
551 for (size_t i = 0; i < young_length + 1; i++) {
552 _heap_region_indices[i] = UINT_MAX;
553 }
554 }
555 ~G1VerifyYoungCSetIndicesClosure() {
556 FREE_C_HEAP_ARRAY(int, _heap_region_indices);
557 }
558
559 virtual bool do_heap_region(HeapRegion* r) {
560 const uint idx = r->young_index_in_cset();
561
562 assert(idx > 0, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
563 assert(idx <= _young_length, "Young cset index %u too large for region %u", idx, r->hrm_index());
564
565 assert(_heap_region_indices[idx] == UINT_MAX,
566 "Index %d used by multiple regions, first use by region %u, second by region %u",
567 idx, _heap_region_indices[idx], r->hrm_index());
568
569 _heap_region_indices[idx] = r->hrm_index();
570
571 return false;
572 }
573 };
574
575 void G1CollectionSet::verify_young_cset_indices() const {
576 assert_at_safepoint_on_vm_thread();
577
578 G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
579 iterate(&cl);
580 }
581 #endif
|
49
50 double G1CollectionSet::predict_region_non_copy_time_ms(HeapRegion* hr) const {
51 return _policy->predict_region_non_copy_time_ms(hr, collector_state()->in_young_only_phase());
52 }
53
54 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
55 _g1h(g1h),
56 _policy(policy),
57 _candidates(NULL),
58 _eden_region_length(0),
59 _survivor_region_length(0),
60 _old_region_length(0),
61 _collection_set_regions(NULL),
62 _collection_set_cur_length(0),
63 _collection_set_max_length(0),
64 _num_optional_regions(0),
65 _bytes_used_before(0),
66 _recorded_rs_length(0),
67 _inc_build_state(Inactive),
68 _inc_part_start(0),
69 _cur_eden_young_idx(0),
70 _inc_collection_set_stats(NULL),
71 _inc_bytes_used_before(0),
72 _inc_recorded_rs_length(0),
73 _inc_recorded_rs_length_diff(0),
74 _inc_predicted_non_copy_time_ms(0.0),
75 _inc_predicted_non_copy_time_ms_diff(0.0) {
76 }
77
78 G1CollectionSet::~G1CollectionSet() {
79 FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
80 FREE_C_HEAP_ARRAY(IncCollectionSetRegionStat, _inc_collection_set_stats);
81 free_optional_regions();
82 clear_candidates();
83 }
84
85 void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
86 uint survivor_cset_region_length) {
87 assert_at_safepoint_on_vm_thread();
88
89 _eden_region_length = eden_cset_region_length;
146 hr->set_index_in_opt_cset(_num_optional_regions++);
147 }
148
149 void G1CollectionSet::start_incremental_building() {
150 assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
151 assert(_inc_build_state == Inactive, "Precondition");
152 #ifdef ASSERT
153 for (size_t i = 0; i < _collection_set_max_length; i++) {
154 _inc_collection_set_stats[i].reset();
155 }
156 #endif
157
158 _inc_bytes_used_before = 0;
159
160 _inc_recorded_rs_length = 0;
161 _inc_recorded_rs_length_diff = 0;
162 _inc_predicted_non_copy_time_ms = 0.0;
163 _inc_predicted_non_copy_time_ms_diff = 0.0;
164
165 update_incremental_marker();
166 _cur_eden_young_idx = 0;
167 }
168
169 void G1CollectionSet::finalize_incremental_building() {
170 assert(_inc_build_state == Active, "Precondition");
171 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
172
173 // The two "main" fields, _inc_recorded_rs_length and
174 // _inc_predicted_non_copy_time_ms, are updated by the thread
175 // that adds a new region to the CSet. Further updates by the
176 // concurrent refinement thread that samples the young RSet lengths
177 // are accumulated in the *_diff fields. Here we add the diffs to
178 // the "main" fields.
179
180 _inc_recorded_rs_length += _inc_recorded_rs_length_diff;
181 _inc_predicted_non_copy_time_ms += _inc_predicted_non_copy_time_ms_diff;
182
183 _inc_recorded_rs_length_diff = 0;
184 _inc_predicted_non_copy_time_ms_diff = 0.0;
185 }
186
291 size_t rs_length = hr->rem_set()->occupied();
292 double non_copy_time = predict_region_non_copy_time_ms(hr);
293
294 // Cache the values we have added to the aggregated information
295 // in the heap region in case we have to remove this region from
296 // the incremental collection set, or it is updated by the
297 // rset sampling code
298
299 IncCollectionSetRegionStat* stat = &_inc_collection_set_stats[hr->hrm_index()];
300 stat->_rs_length = rs_length;
301 stat->_non_copy_time_ms = non_copy_time;
302
303 _inc_recorded_rs_length += rs_length;
304 _inc_predicted_non_copy_time_ms += non_copy_time;
305 _inc_bytes_used_before += hr->used();
306 }
307
308 assert(!hr->in_collection_set(), "invariant");
309 _g1h->register_young_region_with_region_attr(hr);
310
311 _collection_set_regions[_collection_set_cur_length] = hr->hrm_index();
312 // Concurrent readers must observe the store of the value in the array before an
313 // update to the length field.
314 OrderAccess::storestore();
315 _collection_set_cur_length++;
316 assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
317 }
318
319 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
320 assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
321 add_young_region_common(hr);
322 _survivor_region_length++;
323 }
324
325 void G1CollectionSet::add_eden_region(HeapRegion* hr) {
326 assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
327 assert(_collection_set_cur_length <= INT_MAX, "Collection set is too large with %d entries", (int)_collection_set_cur_length);
328
329 // Young index for eden regions should start at 1. 0 is reserved for other region types.
330 uint next_eden_young_idx = ++_cur_eden_young_idx;
331 assert(next_eden_young_idx > 0, "Invalid next young region idx %d", next_eden_young_idx);
332 hr->set_young_index_in_cset(next_eden_young_idx);
333
334 add_young_region_common(hr);
335 }
336
337 #ifndef PRODUCT
338 class G1VerifyYoungAgesClosure : public HeapRegionClosure {
339 public:
340 bool _valid;
341
342 G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
343
344 virtual bool do_heap_region(HeapRegion* r) {
345 guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
346
347 if (r->is_survivor()) {
348 assert(r->survivor_bytes() > 0, "## encountered survivor region without contents");
349 assert(!r->has_surv_rate_group(), "## encountered surv_rate_group in survivor region");
350 return false;
351 }
352
353 if (!r->has_surv_rate_group()) {
354 log_error(gc, verify)("## encountered eden region without surv_rate_group");
355 _valid = false;
356 }
357
358 if (!r->has_valid_age_in_surv_rate()) {
359 log_error(gc, verify)("## encountered invalid age in eden region");
360 _valid = false;
361 }
362
363 return false;
364 }
365
366 bool valid() const { return _valid; }
367 };
368
369 bool G1CollectionSet::verify_young_ages() {
370 assert_at_safepoint_on_vm_thread();
371
372 G1VerifyYoungAgesClosure cl;
373 iterate(&cl);
374
375 if (!cl.valid()) {
376 LogStreamHandle(Error, gc, verify) log;
377 print(&log);
378 }
379
553
554 #ifdef ASSERT
555 class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
556 private:
557 size_t _young_length;
558 uint* _heap_region_indices;
559 public:
560 G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
561 _heap_region_indices = NEW_C_HEAP_ARRAY(uint, young_length + 1, mtGC);
562 for (size_t i = 0; i < young_length + 1; i++) {
563 _heap_region_indices[i] = UINT_MAX;
564 }
565 }
566 ~G1VerifyYoungCSetIndicesClosure() {
567 FREE_C_HEAP_ARRAY(int, _heap_region_indices);
568 }
569
570 virtual bool do_heap_region(HeapRegion* r) {
571 const uint idx = r->young_index_in_cset();
572
573 assert(idx > 0 || r->survivor_bytes() > 0,
574 "Young index must be set for all regions not having survivor bytes but region %u.",
575 r->hrm_index());
576 assert(idx == 0 || idx <= _young_length, "Young cset index %u too large for region %u", idx, r->hrm_index());
577
578 if (idx != 0) {
579 assert(_heap_region_indices[idx] == UINT_MAX,
580 "Index %d used by multiple regions, first use by region %u, second by region %u",
581 idx, _heap_region_indices[idx], r->hrm_index());
582 _heap_region_indices[idx] = r->hrm_index();
583 }
584
585 return false;
586 }
587 };
588
589 void G1CollectionSet::verify_young_cset_indices() const {
590 assert_at_safepoint_on_vm_thread();
591
592 G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
593 iterate(&cl);
594 }
595 #endif
|