95 return aligned_num_regions + max_waste;
96 }
97
98 public:
99 G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) :
100 _max_size(required_array_size(max_num_regions, chunk_size, num_workers)),
101 _chunk_size(chunk_size),
102 _data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)),
103 _cur_claim_idx(0) {
104 for (uint i = 0; i < _max_size; i++) {
105 _data[i] = NULL;
106 }
107 }
108
109 ~G1BuildCandidateArray() {
110 FREE_C_HEAP_ARRAY(HeapRegion*, _data);
111 }
112
113 // Claim a new chunk, returning its bounds [from, to[.
114 void claim_chunk(uint& from, uint& to) {
115 uint result = Atomic::add(_chunk_size, &_cur_claim_idx);
116 assert(_max_size > result - 1,
117 "Array too small, is %u should be %u with chunk size %u.",
118 _max_size, result, _chunk_size);
119 from = result - _chunk_size;
120 to = result;
121 }
122
123 // Set element in array.
124 void set(uint idx, HeapRegion* hr) {
125 assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
126 assert(_data[idx] == NULL, "Value must not have been set.");
127 _data[idx] = hr;
128 }
129
130 void sort_and_copy_into(HeapRegion** dest, uint num_regions) {
131 if (_cur_claim_idx == 0) {
132 return;
133 }
134 for (uint i = _cur_claim_idx; i < _max_size; i++) {
135 assert(_data[i] == NULL, "must be");
197 r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
198 }
199 return false;
200 }
201
202 uint regions_added() const { return _regions_added; }
203 size_t reclaimable_bytes_added() const { return _reclaimable_bytes_added; }
204 };
205
206 G1CollectedHeap* _g1h;
207 HeapRegionClaimer _hrclaimer;
208
209 uint volatile _num_regions_added;
210 size_t volatile _reclaimable_bytes_added;
211
212 G1BuildCandidateArray _result;
213
214 void update_totals(uint num_regions, size_t reclaimable_bytes) {
215 if (num_regions > 0) {
216 assert(reclaimable_bytes > 0, "invariant");
217 Atomic::add(num_regions, &_num_regions_added);
218 Atomic::add(reclaimable_bytes, &_reclaimable_bytes_added);
219 } else {
220 assert(reclaimable_bytes == 0, "invariant");
221 }
222 }
223
224 public:
225 G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) :
226 AbstractGangTask("G1 Build Candidate Regions"),
227 _g1h(G1CollectedHeap::heap()),
228 _hrclaimer(num_workers),
229 _num_regions_added(0),
230 _reclaimable_bytes_added(0),
231 _result(max_num_regions, chunk_size, num_workers) { }
232
233 void work(uint worker_id) {
234 G1BuildCandidateRegionsClosure cl(&_result);
235 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
236 update_totals(cl.regions_added(), cl.reclaimable_bytes_added());
237 }
238
|
95 return aligned_num_regions + max_waste;
96 }
97
98 public:
99 G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) :
100 _max_size(required_array_size(max_num_regions, chunk_size, num_workers)),
101 _chunk_size(chunk_size),
102 _data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)),
103 _cur_claim_idx(0) {
104 for (uint i = 0; i < _max_size; i++) {
105 _data[i] = NULL;
106 }
107 }
108
109 ~G1BuildCandidateArray() {
110 FREE_C_HEAP_ARRAY(HeapRegion*, _data);
111 }
112
113 // Claim a new chunk, returning its bounds [from, to[.
114 void claim_chunk(uint& from, uint& to) {
115 uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
116 assert(_max_size > result - 1,
117 "Array too small, is %u should be %u with chunk size %u.",
118 _max_size, result, _chunk_size);
119 from = result - _chunk_size;
120 to = result;
121 }
122
123 // Set element in array.
124 void set(uint idx, HeapRegion* hr) {
125 assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
126 assert(_data[idx] == NULL, "Value must not have been set.");
127 _data[idx] = hr;
128 }
129
130 void sort_and_copy_into(HeapRegion** dest, uint num_regions) {
131 if (_cur_claim_idx == 0) {
132 return;
133 }
134 for (uint i = _cur_claim_idx; i < _max_size; i++) {
135 assert(_data[i] == NULL, "must be");
197 r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
198 }
199 return false;
200 }
201
202 uint regions_added() const { return _regions_added; }
203 size_t reclaimable_bytes_added() const { return _reclaimable_bytes_added; }
204 };
205
206 G1CollectedHeap* _g1h;
207 HeapRegionClaimer _hrclaimer;
208
209 uint volatile _num_regions_added;
210 size_t volatile _reclaimable_bytes_added;
211
212 G1BuildCandidateArray _result;
213
214 void update_totals(uint num_regions, size_t reclaimable_bytes) {
215 if (num_regions > 0) {
216 assert(reclaimable_bytes > 0, "invariant");
217 Atomic::add(&_num_regions_added, num_regions);
218 Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);
219 } else {
220 assert(reclaimable_bytes == 0, "invariant");
221 }
222 }
223
224 public:
225 G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) :
226 AbstractGangTask("G1 Build Candidate Regions"),
227 _g1h(G1CollectedHeap::heap()),
228 _hrclaimer(num_workers),
229 _num_regions_added(0),
230 _reclaimable_bytes_added(0),
231 _result(max_num_regions, chunk_size, num_workers) { }
232
233 void work(uint worker_id) {
234 G1BuildCandidateRegionsClosure cl(&_result);
235 _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
236 update_totals(cl.regions_added(), cl.reclaimable_bytes_added());
237 }
238
|