62 return;
63 }
64
65 if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
66 return;
67 }
68 size_t card_index = _ct->index_for(p);
69 if (card_index != _last_enqueued_card) {
70 _rdcq->enqueue(_ct->byte_for_index(card_index));
71 _last_enqueued_card = card_index;
72 }
73 }
74 };
75
76 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
77 G1CollectedHeap* _g1h;
78 G1ConcurrentMark* _cm;
79 HeapRegion* _hr;
80 size_t _marked_bytes;
81 UpdateLogBuffersDeferred* _log_buffer_cl;
82 bool _during_initial_mark;
83 uint _worker_id;
84 HeapWord* _last_forwarded_object_end;
85
86 public:
87 RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
88 UpdateLogBuffersDeferred* log_buffer_cl,
89 bool during_initial_mark,
90 uint worker_id) :
91 _g1h(G1CollectedHeap::heap()),
92 _cm(_g1h->concurrent_mark()),
93 _hr(hr),
94 _marked_bytes(0),
95 _log_buffer_cl(log_buffer_cl),
96 _during_initial_mark(during_initial_mark),
97 _worker_id(worker_id),
98 _last_forwarded_object_end(hr->bottom()) { }
99
100 size_t marked_bytes() { return _marked_bytes; }
101
102 // Iterate over the live objects in the region to find self-forwarded objects
103 // that need to be kept live. We need to update the remembered sets of these
104 // objects. Further update the BOT and marks.
105 // We can coalesce and overwrite the remaining heap contents with dummy objects
106 // as they have either been dead or evacuated (which are unreferenced now, i.e.
107 // dead too) already.
108 void do_object(oop obj) {
109 HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
110 assert(_hr->is_in(obj_addr), "sanity");
111
112 if (obj->is_forwarded() && obj->forwardee() == obj) {
113 // The object failed to move.
114
115 zap_dead_objects(_last_forwarded_object_end, obj_addr);
116 // We consider all objects that we find self-forwarded to be
117 // live. What we'll do is that we'll update the prev marking
118 // info so that they are all under PTAMS and explicitly marked.
119 if (!_cm->is_marked_in_prev_bitmap(obj)) {
120 _cm->mark_in_prev_bitmap(obj);
121 }
122 if (_during_initial_mark) {
123 // For the next marking info we'll only mark the
124 // self-forwarded objects explicitly if we are during
125 // initial-mark (since, normally, we only mark objects pointed
126 // to by roots if we succeed in copying them). By marking all
127 // self-forwarded objects we ensure that we mark any that are
128 // still pointed to be roots. During concurrent marking, and
129 // after initial-mark, we don't need to mark any objects
130 // explicitly and all objects in the CSet are considered
131 // (implicitly) live. So, we won't mark them explicitly and
132 // we'll leave them over NTAMS.
133 _cm->mark_in_next_bitmap(_worker_id, _hr, obj);
134 }
135 size_t obj_size = obj->size();
136
137 _marked_bytes += (obj_size * HeapWordSize);
138 PreservedMarks::init_forwarded_mark(obj);
139
140 // While we were processing RSet buffers during the collection,
141 // we actually didn't scan any cards on the collection set,
142 // since we didn't want to update remembered sets with entries
143 // that point into the collection set, given that live objects
144 // from the collection set are about to move and such entries
145 // will be stale very soon.
146 // This change also dealt with a reliability issue which
147 // involved scanning a card in the collection set and coming
148 // across an array that was being chunked and looking malformed.
149 // The problem is that, if evacuation fails, we might have
194 zap_dead_objects(_last_forwarded_object_end, _hr->top());
195 }
196 };
197
198 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
199 G1CollectedHeap* _g1h;
200 uint _worker_id;
201
202 G1RedirtyCardsQueue _rdcq;
203 UpdateLogBuffersDeferred _log_buffer_cl;
204
205 public:
206 RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, uint worker_id) :
207 _g1h(G1CollectedHeap::heap()),
208 _worker_id(worker_id),
209 _rdcq(rdcqs),
210 _log_buffer_cl(&_rdcq) {
211 }
212
213 size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
214 bool during_initial_mark) {
215 RemoveSelfForwardPtrObjClosure rspc(hr,
216 &_log_buffer_cl,
217 during_initial_mark,
218 _worker_id);
219 hr->object_iterate(&rspc);
220 // Need to zap the remainder area of the processed region.
221 rspc.zap_remainder();
222
223 return rspc.marked_bytes();
224 }
225
226 bool do_heap_region(HeapRegion *hr) {
227 assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
228 assert(hr->in_collection_set(), "bad CS");
229
230 if (hr->evacuation_failed()) {
231 hr->clear_index_in_opt_cset();
232
233 bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
234 bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
235
236 hr->note_self_forwarding_removal_start(during_initial_mark,
237 during_conc_mark);
238 _g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
239
240 hr->reset_bot();
241
242 size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark);
243
244 hr->rem_set()->clean_strong_code_roots(hr);
245 hr->rem_set()->clear_locked(true);
246
247 hr->note_self_forwarding_removal_end(live_bytes);
248 }
249 return false;
250 }
251 };
252
253 G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs) :
254 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
255 _g1h(G1CollectedHeap::heap()),
256 _rdcqs(rdcqs),
257 _hrclaimer(_g1h->workers()->active_workers()) { }
258
259 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
260 RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id);
261
262 _g1h->collection_set_iterate_increment_from(&rsfp_cl, &_hrclaimer, worker_id);
|
62 return;
63 }
64
65 if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
66 return;
67 }
68 size_t card_index = _ct->index_for(p);
69 if (card_index != _last_enqueued_card) {
70 _rdcq->enqueue(_ct->byte_for_index(card_index));
71 _last_enqueued_card = card_index;
72 }
73 }
74 };
75
76 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
77 G1CollectedHeap* _g1h;
78 G1ConcurrentMark* _cm;
79 HeapRegion* _hr;
80 size_t _marked_bytes;
81 UpdateLogBuffersDeferred* _log_buffer_cl;
82 bool _during_concurrent_start;
83 uint _worker_id;
84 HeapWord* _last_forwarded_object_end;
85
86 public:
87 RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
88 UpdateLogBuffersDeferred* log_buffer_cl,
89 bool during_concurrent_start,
90 uint worker_id) :
91 _g1h(G1CollectedHeap::heap()),
92 _cm(_g1h->concurrent_mark()),
93 _hr(hr),
94 _marked_bytes(0),
95 _log_buffer_cl(log_buffer_cl),
96 _during_concurrent_start(during_concurrent_start),
97 _worker_id(worker_id),
98 _last_forwarded_object_end(hr->bottom()) { }
99
100 size_t marked_bytes() { return _marked_bytes; }
101
102 // Iterate over the live objects in the region to find self-forwarded objects
103 // that need to be kept live. We need to update the remembered sets of these
104 // objects. Further update the BOT and marks.
105 // We can coalesce and overwrite the remaining heap contents with dummy objects
106 // as they have either been dead or evacuated (which are unreferenced now, i.e.
107 // dead too) already.
108 void do_object(oop obj) {
109 HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
110 assert(_hr->is_in(obj_addr), "sanity");
111
112 if (obj->is_forwarded() && obj->forwardee() == obj) {
113 // The object failed to move.
114
115 zap_dead_objects(_last_forwarded_object_end, obj_addr);
116 // We consider all objects that we find self-forwarded to be
117 // live. What we'll do is that we'll update the prev marking
118 // info so that they are all under PTAMS and explicitly marked.
119 if (!_cm->is_marked_in_prev_bitmap(obj)) {
120 _cm->mark_in_prev_bitmap(obj);
121 }
122 if (_during_concurrent_start) {
123 // For the next marking info we'll only mark the
124 // self-forwarded objects explicitly if we are during
125 // concurrent start (since, normally, we only mark objects pointed
126 // to by roots if we succeed in copying them). By marking all
127 // self-forwarded objects we ensure that we mark any that are
128 // still pointed to be roots. During concurrent marking, and
129 // after concurrent start, we don't need to mark any objects
130 // explicitly and all objects in the CSet are considered
131 // (implicitly) live. So, we won't mark them explicitly and
132 // we'll leave them over NTAMS.
133 _cm->mark_in_next_bitmap(_worker_id, _hr, obj);
134 }
135 size_t obj_size = obj->size();
136
137 _marked_bytes += (obj_size * HeapWordSize);
138 PreservedMarks::init_forwarded_mark(obj);
139
140 // While we were processing RSet buffers during the collection,
141 // we actually didn't scan any cards on the collection set,
142 // since we didn't want to update remembered sets with entries
143 // that point into the collection set, given that live objects
144 // from the collection set are about to move and such entries
145 // will be stale very soon.
146 // This change also dealt with a reliability issue which
147 // involved scanning a card in the collection set and coming
148 // across an array that was being chunked and looking malformed.
149 // The problem is that, if evacuation fails, we might have
194 zap_dead_objects(_last_forwarded_object_end, _hr->top());
195 }
196 };
197
198 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
199 G1CollectedHeap* _g1h;
200 uint _worker_id;
201
202 G1RedirtyCardsQueue _rdcq;
203 UpdateLogBuffersDeferred _log_buffer_cl;
204
205 public:
206 RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, uint worker_id) :
207 _g1h(G1CollectedHeap::heap()),
208 _worker_id(worker_id),
209 _rdcq(rdcqs),
210 _log_buffer_cl(&_rdcq) {
211 }
212
213 size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
214 bool during_concurrent_start) {
215 RemoveSelfForwardPtrObjClosure rspc(hr,
216 &_log_buffer_cl,
217 during_concurrent_start,
218 _worker_id);
219 hr->object_iterate(&rspc);
220 // Need to zap the remainder area of the processed region.
221 rspc.zap_remainder();
222
223 return rspc.marked_bytes();
224 }
225
226 bool do_heap_region(HeapRegion *hr) {
227 assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
228 assert(hr->in_collection_set(), "bad CS");
229
230 if (hr->evacuation_failed()) {
231 hr->clear_index_in_opt_cset();
232
233 bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc();
234 bool during_concurrent_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
235
236 hr->note_self_forwarding_removal_start(during_concurrent_start,
237 during_concurrent_mark);
238 _g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
239
240 hr->reset_bot();
241
242 size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_concurrent_start);
243
244 hr->rem_set()->clean_strong_code_roots(hr);
245 hr->rem_set()->clear_locked(true);
246
247 hr->note_self_forwarding_removal_end(live_bytes);
248 }
249 return false;
250 }
251 };
252
253 G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs) :
254 AbstractGangTask("G1 Remove Self-forwarding Pointers"),
255 _g1h(G1CollectedHeap::heap()),
256 _rdcqs(rdcqs),
257 _hrclaimer(_g1h->workers()->active_workers()) { }
258
259 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
260 RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id);
261
262 _g1h->collection_set_iterate_increment_from(&rsfp_cl, &_hrclaimer, worker_id);
|