19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
30 #include "gc/g1/g1OopClosures.hpp"
31 #include "gc/g1/g1ParScanThreadState.inline.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/g1RemSet.inline.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "memory/iterator.inline.hpp"
37 #include "runtime/prefetch.inline.hpp"
38
39 // This closure is applied to the fields of the objects that have just been copied.
40 template <class T>
41 inline void G1ParScanClosure::do_oop_nv(T* p) {
42 T heap_oop = oopDesc::load_heap_oop(p);
43
44 if (!oopDesc::is_null(heap_oop)) {
45 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
46 const InCSetState state = _g1->in_cset_state(obj);
47 if (state.is_in_cset()) {
48 // We're not going to even bother checking whether the object is
49 // already forwarded or not, as this usually causes an immediate
50 // stall. We'll try to prefetch the object (for write, given that
51 // we might need to install the forwarding reference) and we'll
52 // get back to it when pop it from the queue
53 Prefetch::write(obj->mark_addr(), 0);
54 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
55
56 // slightly paranoid test; I'm trying to catch potential
57 // problems before we go into push_on_queue to know where the
58 // problem is coming from
59 assert((obj == oopDesc::load_decode_heap_oop(p)) ||
60 (obj->is_forwarded() &&
61 obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
62 "p should still be pointing to obj or to its forwardee");
63
64 _par_scan_state->push_on_queue(p);
65 } else {
66 if (state.is_humongous()) {
67 _g1->set_humongous_is_live(obj);
68 } else if (state.is_ext()) {
69 _par_scan_state->do_oop_ext(p);
70 }
71 _par_scan_state->update_rs(_from, p, obj);
72 }
73 }
74 }
75
76 template <class T>
77 inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
78 T heap_oop = oopDesc::load_heap_oop(p);
79
80 if (!oopDesc::is_null(heap_oop)) {
81 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
82 const InCSetState state = _g1->in_cset_state(obj);
83 if (state.is_in_cset_or_humongous()) {
84 Prefetch::write(obj->mark_addr(), 0);
85 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
86
87 // Place on the references queue
88 _par_scan_state->push_on_queue(p);
89 } else if (state.is_ext()) {
90 _par_scan_state->do_oop_ext(p);
91 } else {
92 assert(!_g1->is_in_cset(obj), "checking");
93 }
94 }
95 }
96
97 template <class T>
98 inline void G1CMOopClosure::do_oop_nv(T* p) {
99 oop obj = oopDesc::load_decode_heap_oop(p);
100 _task->deal_with_reference(obj);
101 }
102
103 template <class T>
104 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
105 T heap_oop = oopDesc::load_heap_oop(p);
106 if (!oopDesc::is_null(heap_oop)) {
107 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
108 HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
109 _cm->grayRoot(obj, hr);
110 }
111 }
112
113 template <class T>
128 from->humongous_start_region() == g1->heap_region_containing(p)->humongous_start_region()),
129 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
130 p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index());
131 #endif // ASSERT
132 }
133
134 template <class T>
135 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
136 T o = oopDesc::load_heap_oop(p);
137 if (oopDesc::is_null(o)) {
138 return;
139 }
140 oop obj = oopDesc::decode_heap_oop_not_null(o);
141
142 check_obj_during_refinement(p, obj);
143
144 if (HeapRegion::is_in_same_region(p, obj)) {
145 // Normally this closure should only be called with cross-region references.
146 // But since Java threads are manipulating the references concurrently and we
147 // reload the values things may have changed.
148 // This check lets slip through references from a humongous continues region
149 // to its humongous start region, as they are in different regions, and adds a
150 // remembered set entry. This is benign (apart from memory usage), as this
151 // closure is never called during evacuation.
152 return;
153 }
154
155 HeapRegion* to = _g1->heap_region_containing(obj);
156
157 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
158 to->rem_set()->add_reference(p, _worker_i);
159 }
160
161 template <class T>
162 inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
163 oop obj = oopDesc::load_decode_heap_oop(p);
164 if (obj == NULL) {
165 return;
166 }
167
168 #ifdef ASSERT
169 // can't do because of races
170 // assert(obj == NULL || obj->is_oop(), "expected an oop");
171 assert(check_obj_alignment(obj), "not oop aligned");
172 assert(_g1->is_in_reserved(obj), "must be in heap");
173 #endif // ASSERT
174
175 assert(_from != NULL, "from region must be non-NULL");
176 assert(_from->is_in_reserved(p) ||
177 (_from->is_humongous() &&
178 _g1->heap_region_containing(p)->is_humongous() &&
179 _from->humongous_start_region() == _g1->heap_region_containing(p)->humongous_start_region()),
180 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
181 p2i(p), _from->hrm_index(), _from->humongous_start_region()->hrm_index());
182
183 HeapRegion* to = _g1->heap_region_containing(obj);
184 if (_from == to) {
185 // Normally this closure should only be called with cross-region references.
186 // But since Java threads are manipulating the references concurrently and we
187 // reload the values things may have changed.
188 // Also this check lets slip through references from a humongous continues region
189 // to its humongous start region, as they are in different regions, and adds a
190 // remembered set entry. This is benign (apart from memory usage), as we never
191 // try to either evacuate or eager reclaim these kind of regions.
192 return;
193 }
194
195 // The _record_refs_into_cset flag is true during the RSet
196 // updating part of an evacuation pause. It is false at all
197 // other times:
198 // * rebuilding the remembered sets after a full GC
199 // * during concurrent refinement.
200 // * updating the remembered sets of regions in the collection
201 // set in the event of an evacuation failure (when deferred
202 // updates are enabled).
203
204 if (_record_refs_into_cset && to->in_collection_set()) {
205 // We are recording references that point into the collection
206 // set and this particular reference does exactly that...
207 // If the referenced object has already been forwarded
208 // to itself, we are handling an evacuation failure and
209 // we have already visited/tried to copy this object
210 // there is no need to retry.
211 if (!self_forwarded(obj)) {
212 assert(_push_ref_cl != NULL, "should not be null");
213 // Push the reference in the refs queue of the G1ParScanThreadState
214 // instance for this worker thread.
215 _push_ref_cl->do_oop(p);
216 }
217 _has_refs_into_cset = true;
218
219 // Deferred updates to the CSet are either discarded (in the normal case),
220 // or processed (if an evacuation failure occurs) at the end
221 // of the collection.
222 // See G1RemSet::cleanup_after_oops_into_collection_set_do().
223 } else {
224 // We either don't care about pushing references that point into the
225 // collection set (i.e. we're not during an evacuation pause) _or_
226 // the reference doesn't point into the collection set. Either way
227 // we add the reference directly to the RSet of the region containing
228 // the referenced object.
229 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
230 to->rem_set()->add_reference(p, _worker_i);
231 }
232 }
233 void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_nv(p); }
234 void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
235
236 template <class T>
237 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
238 if (_g1->heap_region_containing(new_obj)->is_young()) {
239 _scanned_klass->record_modified_oops();
240 }
241 }
242
243 void G1ParCopyHelper::mark_object(oop obj) {
244 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
245
246 // We know that the object is not moving so it's safe to read its size.
247 _cm->grayRoot(obj);
248 }
249
250 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
251 assert(from_obj->is_forwarded(), "from obj should be forwarded");
252 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
253 assert(from_obj != to_obj, "should not be self-forwarded");
254
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
30 #include "gc/g1/g1OopClosures.hpp"
31 #include "gc/g1/g1ParScanThreadState.inline.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/g1RemSet.inline.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "memory/iterator.inline.hpp"
37 #include "runtime/prefetch.inline.hpp"
38
39 template <class T>
40 inline void G1ParClosureSuper::prefetch_and_push(T* p, const oop obj) {
41 // We're not going to even bother checking whether the object is
42 // already forwarded or not, as this usually causes an immediate
43 // stall. We'll try to prefetch the object (for write, given that
44 // we might need to install the forwarding reference) and we'll
45 // get back to it when pop it from the queue
46 Prefetch::write(obj->mark_addr(), 0);
47 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
48
49 // slightly paranoid test; I'm trying to catch potential
50 // problems before we go into push_on_queue to know where the
51 // problem is coming from
52 assert((obj == oopDesc::load_decode_heap_oop(p)) ||
53 (obj->is_forwarded() &&
54 obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
55 "p should still be pointing to obj or to its forwardee");
56
57 _par_scan_state->push_on_queue(p);
58 }
59
60 template <class T>
61 inline void G1ParClosureSuper::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
62 if (state.is_humongous()) {
63 _g1->set_humongous_is_live(obj);
64 } else if (state.is_ext()) {
65 _par_scan_state->do_oop_ext(p);
66 }
67 }
68
69 template <class T>
70 inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
71 T heap_oop = oopDesc::load_heap_oop(p);
72
73 if (oopDesc::is_null(heap_oop)) {
74 return;
75 }
76 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
77 const InCSetState state = _g1->in_cset_state(obj);
78 if (state.is_in_cset()) {
79 prefetch_and_push(p, obj);
80 } else {
81 handle_non_cset_obj_common(state, p, obj);
82
83 _par_scan_state->update_rs(_from, p, obj);
84 }
85 }
86
87 template <class T>
88 inline void G1CMOopClosure::do_oop_nv(T* p) {
89 oop obj = oopDesc::load_decode_heap_oop(p);
90 _task->deal_with_reference(obj);
91 }
92
93 template <class T>
94 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
95 T heap_oop = oopDesc::load_heap_oop(p);
96 if (!oopDesc::is_null(heap_oop)) {
97 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
98 HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
99 _cm->grayRoot(obj, hr);
100 }
101 }
102
103 template <class T>
118 from->humongous_start_region() == g1->heap_region_containing(p)->humongous_start_region()),
119 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
120 p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index());
121 #endif // ASSERT
122 }
123
124 template <class T>
125 inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
126 T o = oopDesc::load_heap_oop(p);
127 if (oopDesc::is_null(o)) {
128 return;
129 }
130 oop obj = oopDesc::decode_heap_oop_not_null(o);
131
132 check_obj_during_refinement(p, obj);
133
134 if (HeapRegion::is_in_same_region(p, obj)) {
135 // Normally this closure should only be called with cross-region references.
136 // But since Java threads are manipulating the references concurrently and we
137 // reload the values things may have changed.
138 // Also this check lets slip through references from a humongous continues region
139 // to its humongous start region, as they are in different regions, and adds a
140 // remembered set entry. This is benign (apart from memory usage), as we never
141 // try to either evacuate or eager reclaim humonguous arrays of j.l.O.
142 return;
143 }
144
145 HeapRegion* to = _g1->heap_region_containing(obj);
146
147 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
148 to->rem_set()->add_reference(p, _worker_i);
149 }
150
151 template <class T>
152 inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
153 T o = oopDesc::load_heap_oop(p);
154 if (oopDesc::is_null(o)) {
155 return;
156 }
157 oop obj = oopDesc::decode_heap_oop_not_null(o);
158
159 check_obj_during_refinement(p, obj);
160
161 assert(!_g1->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1->addr_to_region((HeapWord*)p));
162 const InCSetState state = _g1->in_cset_state(obj);
163 if (state.is_in_cset()) {
164 // Since the source is always from outside the collection set, here we implicitly know
165 // that this is a cross-region reference too.
166 prefetch_and_push(p, obj);
167
168 _has_refs_into_cset = true;
169 } else {
170 HeapRegion* to = _g1->heap_region_containing(obj);
171 if (_from == to) {
172 return;
173 }
174
175 handle_non_cset_obj_common(state, p, obj);
176
177 to->rem_set()->add_reference(p, _worker_i);
178 }
179 }
180
181 template <class T>
182 inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
183 T heap_oop = oopDesc::load_heap_oop(p);
184 if (oopDesc::is_null(heap_oop)) {
185 return;
186 }
187 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
188
189 const InCSetState state = _g1->in_cset_state(obj);
190 if (state.is_in_cset()) {
191 prefetch_and_push(p, obj);
192 } else {
193 handle_non_cset_obj_common(state, p, obj);
194 }
195 }
196
197 template <class T>
198 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
199 if (_g1->heap_region_containing(new_obj)->is_young()) {
200 _scanned_klass->record_modified_oops();
201 }
202 }
203
204 void G1ParCopyHelper::mark_object(oop obj) {
205 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
206
207 // We know that the object is not moving so it's safe to read its size.
208 _cm->grayRoot(obj);
209 }
210
211 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
212 assert(from_obj->is_forwarded(), "from obj should be forwarded");
213 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
214 assert(from_obj != to_obj, "should not be self-forwarded");
215
|