45 // stall. We'll try to prefetch the object (for write, given that
46 // we might need to install the forwarding reference) and we'll
47 // get back to it when pop it from the queue
48 Prefetch::write(obj->mark_addr(), 0);
49 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
50
51 // slightly paranoid test; I'm trying to catch potential
52 // problems before we go into push_on_queue to know where the
53 // problem is coming from
54 assert((obj == RawAccess<>::oop_load(p)) ||
55 (obj->is_forwarded() &&
56 obj->forwardee() == RawAccess<>::oop_load(p)),
57 "p should still be pointing to obj or to its forwardee");
58
59 _par_scan_state->push_on_queue(p);
60 }
61
62 template <class T>
63 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
64 if (state.is_humongous()) {
65 _g1->set_humongous_is_live(obj);
66 }
67 }
68
69 template <class T>
70 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
71 T heap_oop = RawAccess<>::oop_load(p);
72
73 if (CompressedOops::is_null(heap_oop)) {
74 return;
75 }
76 oop obj = CompressedOops::decode_not_null(heap_oop);
77 const InCSetState state = _g1->in_cset_state(obj);
78 if (state.is_in_cset()) {
79 prefetch_and_push(p, obj);
80 } else {
81 if (HeapRegion::is_in_same_region(p, obj)) {
82 return;
83 }
84 handle_non_cset_obj_common(state, p, obj);
85 _par_scan_state->update_rs(_from, p, obj);
86 }
87 }
88
89 template <class T>
90 inline void G1CMOopClosure::do_oop_nv(T* p) {
91 _task->deal_with_reference(p);
92 }
93
94 template <class T>
95 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
96 T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p);
97 if (CompressedOops::is_null(heap_oop)) {
98 return;
99 }
100 oop obj = CompressedOops::decode_not_null(heap_oop);
101 _cm->mark_in_next_bitmap(_worker_id, obj);
102 }
103
104 template <class T>
105 inline static void check_obj_during_refinement(T* p, oop const obj) {
106 #ifdef ASSERT
107 G1CollectedHeap* g1 = G1CollectedHeap::heap();
108 // can't do because of races
109 // assert(oopDesc::is_oop_or_null(obj), "expected an oop");
110 assert(check_obj_alignment(obj), "not oop aligned");
111 assert(g1->is_in_reserved(obj), "must be in heap");
112
113 HeapRegion* from = g1->heap_region_containing(p);
114
115 assert(from != NULL, "from region must be non-NULL");
116 assert(from->is_in_reserved(p) ||
117 (from->is_humongous() &&
118 g1->heap_region_containing(p)->is_humongous() &&
119 from->humongous_start_region() == g1->heap_region_containing(p)->humongous_start_region()),
120 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
121 p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index());
122 #endif // ASSERT
123 }
124
125 template <class T>
126 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
127 T o = RawAccess<MO_VOLATILE>::oop_load(p);
128 if (CompressedOops::is_null(o)) {
129 return;
130 }
131 oop obj = CompressedOops::decode_not_null(o);
132
133 check_obj_during_refinement(p, obj);
134
135 if (HeapRegion::is_in_same_region(p, obj)) {
136 // Normally this closure should only be called with cross-region references.
137 // But since Java threads are manipulating the references concurrently and we
138 // reload the values things may have changed.
139 // Also this check lets slip through references from a humongous continues region
144 }
145
146 HeapRegionRemSet* to_rem_set = _g1->heap_region_containing(obj)->rem_set();
147
148 assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
149 if (to_rem_set->is_tracked()) {
150 to_rem_set->add_reference(p, _worker_i);
151 }
152 }
153
154 template <class T>
155 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
156 T o = RawAccess<>::oop_load(p);
157 if (CompressedOops::is_null(o)) {
158 return;
159 }
160 oop obj = CompressedOops::decode_not_null(o);
161
162 check_obj_during_refinement(p, obj);
163
164 assert(!_g1->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1->addr_to_region((HeapWord*)p));
165 const InCSetState state = _g1->in_cset_state(obj);
166 if (state.is_in_cset()) {
167 // Since the source is always from outside the collection set, here we implicitly know
168 // that this is a cross-region reference too.
169 prefetch_and_push(p, obj);
170 } else {
171 HeapRegion* to = _g1->heap_region_containing(obj);
172 if (_from == to) {
173 return;
174 }
175 handle_non_cset_obj_common(state, p, obj);
176 to->rem_set()->add_reference(p, _worker_i);
177 }
178 }
179
180 template <class T>
181 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
182 T heap_oop = RawAccess<>::oop_load(p);
183 if (CompressedOops::is_null(heap_oop)) {
184 return;
185 }
186 oop obj = CompressedOops::decode_not_null(heap_oop);
187
188 const InCSetState state = _g1->in_cset_state(obj);
189 if (state.is_in_cset()) {
190 prefetch_and_push(p, obj);
191 } else {
192 if (HeapRegion::is_in_same_region(p, obj)) {
193 return;
194 }
195 handle_non_cset_obj_common(state, p, obj);
196 }
197 }
198
199 void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
200 if (_g1->heap_region_containing(new_obj)->is_young()) {
201 _scanned_cld->record_modified_oops();
202 }
203 }
204
205 void G1ParCopyHelper::mark_object(oop obj) {
206 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
207
208 // We know that the object is not moving so it's safe to read its size.
209 _cm->mark_in_next_bitmap(_worker_id, obj);
210 }
211
212 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
213 assert(from_obj->is_forwarded(), "from obj should be forwarded");
214 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
215 assert(from_obj != to_obj, "should not be self-forwarded");
216
217 assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
218 assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
219
220 // The object might be in the process of being copied by another
221 // worker so we cannot trust that its to-space image is
222 // well-formed. So we have to read its size from its from-space
223 // image which we know should not be changing.
224 _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
225 }
226
227 template <G1Barrier barrier, G1Mark do_mark_object>
228 template <class T>
229 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
230 T heap_oop = RawAccess<>::oop_load(p);
231
232 if (CompressedOops::is_null(heap_oop)) {
233 return;
234 }
235
236 oop obj = CompressedOops::decode_not_null(heap_oop);
237
238 assert(_worker_id == _par_scan_state->worker_id(), "sanity");
239
240 const InCSetState state = _g1->in_cset_state(obj);
241 if (state.is_in_cset()) {
242 oop forwardee;
243 markOop m = obj->mark();
244 if (m->is_marked()) {
245 forwardee = (oop) m->decode_pointer();
246 } else {
247 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
248 }
249 assert(forwardee != NULL, "forwardee should not be NULL");
250 RawAccess<>::oop_store(p, forwardee);
251 if (do_mark_object != G1MarkNone && forwardee != obj) {
252 // If the object is self-forwarded we don't need to explicitly
253 // mark it, the evacuation failure protocol will do so.
254 mark_forwarded_object(obj, forwardee);
255 }
256
257 if (barrier == G1BarrierCLD) {
258 do_cld_barrier(forwardee);
259 }
260 } else {
261 if (state.is_humongous()) {
262 _g1->set_humongous_is_live(obj);
263 }
264
265 // The object is not in collection set. If we're a root scanning
266 // closure during an initial mark pause then attempt to mark the object.
267 if (do_mark_object == G1MarkFromRoot) {
268 mark_object(obj);
269 }
270 }
271 }
272
273 template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
274 oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
275 if (obj == NULL) {
276 return;
277 }
278
279 if (HeapRegion::is_in_same_region(p, obj)) {
280 return;
281 }
282
283 HeapRegion* to = _g1->heap_region_containing(obj);
284 HeapRegionRemSet* rem_set = to->rem_set();
285 rem_set->add_reference(p, _worker_id);
286 }
287
288 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
|
45 // stall. We'll try to prefetch the object (for write, given that
46 // we might need to install the forwarding reference) and we'll
47 // get back to it when pop it from the queue
48 Prefetch::write(obj->mark_addr(), 0);
49 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
50
51 // slightly paranoid test; I'm trying to catch potential
52 // problems before we go into push_on_queue to know where the
53 // problem is coming from
54 assert((obj == RawAccess<>::oop_load(p)) ||
55 (obj->is_forwarded() &&
56 obj->forwardee() == RawAccess<>::oop_load(p)),
57 "p should still be pointing to obj or to its forwardee");
58
59 _par_scan_state->push_on_queue(p);
60 }
61
62 template <class T>
63 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
64 if (state.is_humongous()) {
65 _g1h->set_humongous_is_live(obj);
66 }
67 }
68
69 template <class T>
70 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
71 T heap_oop = RawAccess<>::oop_load(p);
72
73 if (CompressedOops::is_null(heap_oop)) {
74 return;
75 }
76 oop obj = CompressedOops::decode_not_null(heap_oop);
77 const InCSetState state = _g1h->in_cset_state(obj);
78 if (state.is_in_cset()) {
79 prefetch_and_push(p, obj);
80 } else {
81 if (HeapRegion::is_in_same_region(p, obj)) {
82 return;
83 }
84 handle_non_cset_obj_common(state, p, obj);
85 _par_scan_state->update_rs(_from, p, obj);
86 }
87 }
88
89 template <class T>
90 inline void G1CMOopClosure::do_oop_nv(T* p) {
91 _task->deal_with_reference(p);
92 }
93
94 template <class T>
95 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
96 T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p);
97 if (CompressedOops::is_null(heap_oop)) {
98 return;
99 }
100 oop obj = CompressedOops::decode_not_null(heap_oop);
101 _cm->mark_in_next_bitmap(_worker_id, obj);
102 }
103
104 template <class T>
105 inline static void check_obj_during_refinement(T* p, oop const obj) {
106 #ifdef ASSERT
107 G1CollectedHeap* g1h = G1CollectedHeap::heap();
108 // can't do because of races
109 // assert(oopDesc::is_oop_or_null(obj), "expected an oop");
110 assert(check_obj_alignment(obj), "not oop aligned");
111 assert(g1h->is_in_reserved(obj), "must be in heap");
112
113 HeapRegion* from = g1h->heap_region_containing(p);
114
115 assert(from != NULL, "from region must be non-NULL");
116 assert(from->is_in_reserved(p) ||
117 (from->is_humongous() &&
118 g1h->heap_region_containing(p)->is_humongous() &&
119 from->humongous_start_region() == g1h->heap_region_containing(p)->humongous_start_region()),
120 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
121 p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index());
122 #endif // ASSERT
123 }
124
125 template <class T>
126 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
127 T o = RawAccess<MO_VOLATILE>::oop_load(p);
128 if (CompressedOops::is_null(o)) {
129 return;
130 }
131 oop obj = CompressedOops::decode_not_null(o);
132
133 check_obj_during_refinement(p, obj);
134
135 if (HeapRegion::is_in_same_region(p, obj)) {
136 // Normally this closure should only be called with cross-region references.
137 // But since Java threads are manipulating the references concurrently and we
138 // reload the values things may have changed.
139 // Also this check lets slip through references from a humongous continues region
144 }
145
146 HeapRegionRemSet* to_rem_set = _g1->heap_region_containing(obj)->rem_set();
147
148 assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
149 if (to_rem_set->is_tracked()) {
150 to_rem_set->add_reference(p, _worker_i);
151 }
152 }
153
154 template <class T>
155 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
156 T o = RawAccess<>::oop_load(p);
157 if (CompressedOops::is_null(o)) {
158 return;
159 }
160 oop obj = CompressedOops::decode_not_null(o);
161
162 check_obj_during_refinement(p, obj);
163
164 assert(!_g1h->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1h->addr_to_region((HeapWord*)p));
165 const InCSetState state = _g1h->in_cset_state(obj);
166 if (state.is_in_cset()) {
167 // Since the source is always from outside the collection set, here we implicitly know
168 // that this is a cross-region reference too.
169 prefetch_and_push(p, obj);
170 } else {
171 HeapRegion* to = _g1h->heap_region_containing(obj);
172 if (_from == to) {
173 return;
174 }
175 handle_non_cset_obj_common(state, p, obj);
176 to->rem_set()->add_reference(p, _worker_i);
177 }
178 }
179
180 template <class T>
181 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
182 T heap_oop = RawAccess<>::oop_load(p);
183 if (CompressedOops::is_null(heap_oop)) {
184 return;
185 }
186 oop obj = CompressedOops::decode_not_null(heap_oop);
187
188 const InCSetState state = _g1h->in_cset_state(obj);
189 if (state.is_in_cset()) {
190 prefetch_and_push(p, obj);
191 } else {
192 if (HeapRegion::is_in_same_region(p, obj)) {
193 return;
194 }
195 handle_non_cset_obj_common(state, p, obj);
196 }
197 }
198
199 void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
200 if (_g1h->heap_region_containing(new_obj)->is_young()) {
201 _scanned_cld->record_modified_oops();
202 }
203 }
204
205 void G1ParCopyHelper::mark_object(oop obj) {
206 assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
207
208 // We know that the object is not moving so it's safe to read its size.
209 _cm->mark_in_next_bitmap(_worker_id, obj);
210 }
211
212 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
213 assert(from_obj->is_forwarded(), "from obj should be forwarded");
214 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
215 assert(from_obj != to_obj, "should not be self-forwarded");
216
217 assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
218 assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
219
220 // The object might be in the process of being copied by another
221 // worker so we cannot trust that its to-space image is
222 // well-formed. So we have to read its size from its from-space
223 // image which we know should not be changing.
224 _cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
225 }
226
227 template <G1Barrier barrier, G1Mark do_mark_object>
228 template <class T>
229 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
230 T heap_oop = RawAccess<>::oop_load(p);
231
232 if (CompressedOops::is_null(heap_oop)) {
233 return;
234 }
235
236 oop obj = CompressedOops::decode_not_null(heap_oop);
237
238 assert(_worker_id == _par_scan_state->worker_id(), "sanity");
239
240 const InCSetState state = _g1h->in_cset_state(obj);
241 if (state.is_in_cset()) {
242 oop forwardee;
243 markOop m = obj->mark();
244 if (m->is_marked()) {
245 forwardee = (oop) m->decode_pointer();
246 } else {
247 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
248 }
249 assert(forwardee != NULL, "forwardee should not be NULL");
250 RawAccess<>::oop_store(p, forwardee);
251 if (do_mark_object != G1MarkNone && forwardee != obj) {
252 // If the object is self-forwarded we don't need to explicitly
253 // mark it, the evacuation failure protocol will do so.
254 mark_forwarded_object(obj, forwardee);
255 }
256
257 if (barrier == G1BarrierCLD) {
258 do_cld_barrier(forwardee);
259 }
260 } else {
261 if (state.is_humongous()) {
262 _g1h->set_humongous_is_live(obj);
263 }
264
265 // The object is not in collection set. If we're a root scanning
266 // closure during an initial mark pause then attempt to mark the object.
267 if (do_mark_object == G1MarkFromRoot) {
268 mark_object(obj);
269 }
270 }
271 }
272
273 template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
274 oop const obj = RawAccess<MO_VOLATILE>::oop_load(p);
275 if (obj == NULL) {
276 return;
277 }
278
279 if (HeapRegion::is_in_same_region(p, obj)) {
280 return;
281 }
282
283 HeapRegion* to = _g1h->heap_region_containing(obj);
284 HeapRegionRemSet* rem_set = to->rem_set();
285 rem_set->add_reference(p, _worker_id);
286 }
287
288 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
|