160 FREE_C_HEAP_ARRAY(bool, _contains);
161 }
162
163 void reset() {
164 _cur_idx = 0;
165 ::memset(_contains, false, _max_regions * sizeof(bool));
166 }
167
168 uint size() const { return _cur_idx; }
169
170 uint at(uint idx) const {
171 assert(idx < _cur_idx, "Index %u beyond valid regions", idx);
172 return _buffer[idx];
173 }
174
175 void add_dirty_region(uint region) {
176 if (_contains[region]) {
177 return;
178 }
179
180 bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false;
181 if (marked_as_dirty) {
182 uint allocated = Atomic::add(1u, &_cur_idx) - 1;
183 _buffer[allocated] = region;
184 }
185 }
186
187 // Creates the union of this and the other G1DirtyRegions.
188 void merge(const G1DirtyRegions* other) {
189 for (uint i = 0; i < other->size(); i++) {
190 uint region = other->at(i);
191 if (!_contains[region]) {
192 _buffer[_cur_idx++] = region;
193 _contains[region] = true;
194 }
195 }
196 }
197 };
198
199 // Creates a snapshot of the current _top values at the start of collection to
200 // filter out card marks that we do not want to scan.
201 class G1ResetScanTopClosure : public HeapRegionClosure {
202 G1RemSetScanState* _scan_state;
238
239 public:
240 G1ClearCardTableTask(G1CollectedHeap* g1h,
241 G1DirtyRegions* regions,
242 uint chunk_length,
243 G1RemSetScanState* scan_state) :
244 AbstractGangTask("G1 Clear Card Table Task"),
245 _g1h(g1h),
246 _regions(regions),
247 _chunk_length(chunk_length),
248 _cur_dirty_regions(0),
249 _scan_state(scan_state) {
250
251 assert(chunk_length > 0, "must be");
252 }
253
254 static uint chunk_size() { return M; }
255
256 void work(uint worker_id) {
257 while (_cur_dirty_regions < _regions->size()) {
258 uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
259 uint max = MIN2(next + _chunk_length, _regions->size());
260
261 for (uint i = next; i < max; i++) {
262 HeapRegion* r = _g1h->region_at(_regions->at(i));
263 if (!r->is_survivor()) {
264 r->clear_cardtable();
265 }
266 }
267 }
268 }
269 };
270
271 // Clear the card table of "dirty" regions.
272 void clear_card_table(WorkGang* workers) {
273 uint num_regions = _all_dirty_regions->size();
274
275 if (num_regions == 0) {
276 return;
277 }
278
420 uint const start_pos = num_regions * worker_id / max_workers;
421 uint cur = start_pos;
422
423 do {
424 bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur)));
425 guarantee(!result, "Not allowed to ask for early termination.");
426 cur++;
427 if (cur == _next_dirty_regions->size()) {
428 cur = 0;
429 }
430 } while (cur != start_pos);
431 }
432
433 // Attempt to claim the given region in the collection set for iteration. Returns true
434 // if this call caused the transition from Unclaimed to Claimed.
435 inline bool claim_collection_set_region(uint region) {
436 assert(region < _max_regions, "Tried to access invalid region %u", region);
437 if (_collection_set_iter_state[region]) {
438 return false;
439 }
440 return !Atomic::cmpxchg(true, &_collection_set_iter_state[region], false);
441 }
442
443 bool has_cards_to_scan(uint region) {
444 assert(region < _max_regions, "Tried to access invalid region %u", region);
445 return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
446 }
447
448 uint claim_cards_to_scan(uint region, uint increment) {
449 assert(region < _max_regions, "Tried to access invalid region %u", region);
450 return Atomic::add(increment, &_card_table_scan_state[region]) - increment;
451 }
452
453 void add_dirty_region(uint const region) {
454 #ifdef ASSERT
455 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
456 assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(),
457 "Region %u is not suitable for scanning, is %sin collection set or %s",
458 hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str());
459 #endif
460 _next_dirty_regions->add_dirty_region(region);
461 }
462
463 void add_all_dirty_region(uint region) {
464 #ifdef ASSERT
465 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
466 assert(hr->in_collection_set(),
467 "Only add young regions to all dirty regions directly but %u is %s",
468 hr->hrm_index(), hr->get_short_type_str());
469 #endif
470 _all_dirty_regions->add_dirty_region(region);
1120 G1BufferNodeList buffers = dcqs.take_all_completed_buffers();
1121 if (buffers._entry_count != 0) {
1122 _dirty_card_buffers.prepend(*buffers._head, *buffers._tail);
1123 }
1124 }
1125 }
1126
1127 virtual void work(uint worker_id) {
1128 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1129 G1GCPhaseTimes* p = g1h->phase_times();
1130
1131 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ?
1132 G1GCPhaseTimes::MergeRS :
1133 G1GCPhaseTimes::OptMergeRS;
1134
1135 // We schedule flushing the remembered sets of humongous fast reclaim candidates
1136 // onto the card table first to allow the remaining parallelized tasks hide it.
1137 if (_initial_evacuation &&
1138 p->fast_reclaim_humongous_candidates() > 0 &&
1139 !_fast_reclaim_handled &&
1140 !Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) {
1141
1142 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
1143
1144 G1FlushHumongousCandidateRemSets cl(_scan_state);
1145 g1h->heap_region_iterate(&cl);
1146
1147 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1148 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1149 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1150 }
1151
1152 // Merge remembered sets of current candidates.
1153 {
1154 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */);
1155 G1MergeCardSetClosure cl(_scan_state);
1156 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
1157
1158 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1159 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1160 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
|
160 FREE_C_HEAP_ARRAY(bool, _contains);
161 }
162
163 void reset() {
164 _cur_idx = 0;
165 ::memset(_contains, false, _max_regions * sizeof(bool));
166 }
167
168 uint size() const { return _cur_idx; }
169
170 uint at(uint idx) const {
171 assert(idx < _cur_idx, "Index %u beyond valid regions", idx);
172 return _buffer[idx];
173 }
174
175 void add_dirty_region(uint region) {
176 if (_contains[region]) {
177 return;
178 }
179
180 bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
181 if (marked_as_dirty) {
182 uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
183 _buffer[allocated] = region;
184 }
185 }
186
187 // Creates the union of this and the other G1DirtyRegions.
188 void merge(const G1DirtyRegions* other) {
189 for (uint i = 0; i < other->size(); i++) {
190 uint region = other->at(i);
191 if (!_contains[region]) {
192 _buffer[_cur_idx++] = region;
193 _contains[region] = true;
194 }
195 }
196 }
197 };
198
199 // Creates a snapshot of the current _top values at the start of collection to
200 // filter out card marks that we do not want to scan.
201 class G1ResetScanTopClosure : public HeapRegionClosure {
202 G1RemSetScanState* _scan_state;
238
239 public:
240 G1ClearCardTableTask(G1CollectedHeap* g1h,
241 G1DirtyRegions* regions,
242 uint chunk_length,
243 G1RemSetScanState* scan_state) :
244 AbstractGangTask("G1 Clear Card Table Task"),
245 _g1h(g1h),
246 _regions(regions),
247 _chunk_length(chunk_length),
248 _cur_dirty_regions(0),
249 _scan_state(scan_state) {
250
251 assert(chunk_length > 0, "must be");
252 }
253
254 static uint chunk_size() { return M; }
255
256 void work(uint worker_id) {
257 while (_cur_dirty_regions < _regions->size()) {
258 uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
259 uint max = MIN2(next + _chunk_length, _regions->size());
260
261 for (uint i = next; i < max; i++) {
262 HeapRegion* r = _g1h->region_at(_regions->at(i));
263 if (!r->is_survivor()) {
264 r->clear_cardtable();
265 }
266 }
267 }
268 }
269 };
270
271 // Clear the card table of "dirty" regions.
272 void clear_card_table(WorkGang* workers) {
273 uint num_regions = _all_dirty_regions->size();
274
275 if (num_regions == 0) {
276 return;
277 }
278
420 uint const start_pos = num_regions * worker_id / max_workers;
421 uint cur = start_pos;
422
423 do {
424 bool result = cl->do_heap_region(g1h->region_at(_next_dirty_regions->at(cur)));
425 guarantee(!result, "Not allowed to ask for early termination.");
426 cur++;
427 if (cur == _next_dirty_regions->size()) {
428 cur = 0;
429 }
430 } while (cur != start_pos);
431 }
432
433 // Attempt to claim the given region in the collection set for iteration. Returns true
434 // if this call caused the transition from Unclaimed to Claimed.
435 inline bool claim_collection_set_region(uint region) {
436 assert(region < _max_regions, "Tried to access invalid region %u", region);
437 if (_collection_set_iter_state[region]) {
438 return false;
439 }
440 return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true);
441 }
442
443 bool has_cards_to_scan(uint region) {
444 assert(region < _max_regions, "Tried to access invalid region %u", region);
445 return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
446 }
447
448 uint claim_cards_to_scan(uint region, uint increment) {
449 assert(region < _max_regions, "Tried to access invalid region %u", region);
450 return Atomic::add(&_card_table_scan_state[region], increment) - increment;
451 }
452
453 void add_dirty_region(uint const region) {
454 #ifdef ASSERT
455 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
456 assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(),
457 "Region %u is not suitable for scanning, is %sin collection set or %s",
458 hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str());
459 #endif
460 _next_dirty_regions->add_dirty_region(region);
461 }
462
463 void add_all_dirty_region(uint region) {
464 #ifdef ASSERT
465 HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
466 assert(hr->in_collection_set(),
467 "Only add young regions to all dirty regions directly but %u is %s",
468 hr->hrm_index(), hr->get_short_type_str());
469 #endif
470 _all_dirty_regions->add_dirty_region(region);
1120 G1BufferNodeList buffers = dcqs.take_all_completed_buffers();
1121 if (buffers._entry_count != 0) {
1122 _dirty_card_buffers.prepend(*buffers._head, *buffers._tail);
1123 }
1124 }
1125 }
1126
1127 virtual void work(uint worker_id) {
1128 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1129 G1GCPhaseTimes* p = g1h->phase_times();
1130
1131 G1GCPhaseTimes::GCParPhases merge_remset_phase = _initial_evacuation ?
1132 G1GCPhaseTimes::MergeRS :
1133 G1GCPhaseTimes::OptMergeRS;
1134
1135 // We schedule flushing the remembered sets of humongous fast reclaim candidates
1136 // onto the card table first to allow the remaining parallelized tasks hide it.
1137 if (_initial_evacuation &&
1138 p->fast_reclaim_humongous_candidates() > 0 &&
1139 !_fast_reclaim_handled &&
1140 !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) {
1141
1142 G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
1143
1144 G1FlushHumongousCandidateRemSets cl(_scan_state);
1145 g1h->heap_region_iterate(&cl);
1146
1147 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1148 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1149 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
1150 }
1151
1152 // Merge remembered sets of current candidates.
1153 {
1154 G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, _initial_evacuation /* must_record */);
1155 G1MergeCardSetClosure cl(_scan_state);
1156 g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
1157
1158 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
1159 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
1160 p->record_or_add_thread_work_item(merge_remset_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
|