19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
30 #include "gc/g1/g1OopClosures.hpp"
31 #include "gc/g1/g1ParScanThreadState.inline.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/g1RemSet.inline.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "memory/iterator.inline.hpp"
37 #include "runtime/prefetch.inline.hpp"
38
39 /*
40 * This really ought to be an inline function, but apparently the C++
41 * compiler sometimes sees fit to ignore inline declarations. Sigh.
42 */
43
44 template <class T>
45 inline void FilterIntoCSClosure::do_oop_work(T* p) {
46 T heap_oop = oopDesc::load_heap_oop(p);
47 if (!oopDesc::is_null(heap_oop) &&
48 _g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) {
49 _oc->do_oop(p);
50 }
51 }
52
53 template <class T>
54 inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
55 T heap_oop = oopDesc::load_heap_oop(p);
56 if (!oopDesc::is_null(heap_oop)) {
57 HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
58 if (obj_hw < _r_bottom || obj_hw >= _r_end) {
59 _oc->do_oop(p);
60 }
61 }
62 }
63
64 // This closure is applied to the fields of the objects that have just been copied.
65 template <class T>
66 inline void G1ParScanClosure::do_oop_nv(T* p) {
67 T heap_oop = oopDesc::load_heap_oop(p);
68
69 if (!oopDesc::is_null(heap_oop)) {
70 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
71 const InCSetState state = _g1->in_cset_state(obj);
72 if (state.is_in_cset()) {
73 // We're not going to even bother checking whether the object is
74 // already forwarded or not, as this usually causes an immediate
75 // stall. We'll try to prefetch the object (for write, given that
76 // we might need to install the forwarding reference) and we'll
77 // get back to it when pop it from the queue
78 Prefetch::write(obj->mark_addr(), 0);
79 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
80
81 // slightly paranoid test; I'm trying to catch potential
82 // problems before we go into push_on_queue to know where the
83 // problem is coming from
119 }
120 }
121
122 template <class T>
123 inline void G1CMOopClosure::do_oop_nv(T* p) {
124 oop obj = oopDesc::load_decode_heap_oop(p);
125 _task->deal_with_reference(obj);
126 }
127
128 template <class T>
129 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
130 T heap_oop = oopDesc::load_heap_oop(p);
131 if (!oopDesc::is_null(heap_oop)) {
132 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
133 HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
134 _cm->grayRoot(obj, hr);
135 }
136 }
137
138 template <class T>
139 inline void G1Mux2Closure::do_oop_work(T* p) {
140 // Apply first closure; then apply the second.
141 _c1->do_oop(p);
142 _c2->do_oop(p);
143 }
144 void G1Mux2Closure::do_oop(oop* p) { do_oop_work(p); }
145 void G1Mux2Closure::do_oop(narrowOop* p) { do_oop_work(p); }
146
147 template <class T>
148 inline void G1TriggerClosure::do_oop_work(T* p) {
149 // Record that this closure was actually applied (triggered).
150 _triggered = true;
151 }
152 void G1TriggerClosure::do_oop(oop* p) { do_oop_work(p); }
153 void G1TriggerClosure::do_oop(narrowOop* p) { do_oop_work(p); }
154
155 template <class T>
156 inline void G1InvokeIfNotTriggeredClosure::do_oop_work(T* p) {
157 if (!_trigger_cl->triggered()) {
158 _oop_cl->do_oop(p);
159 }
160 }
161 void G1InvokeIfNotTriggeredClosure::do_oop(oop* p) { do_oop_work(p); }
162 void G1InvokeIfNotTriggeredClosure::do_oop(narrowOop* p) { do_oop_work(p); }
163
164 template <class T>
165 inline void G1UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
166 oop obj = oopDesc::load_decode_heap_oop(p);
167 if (obj == NULL) {
168 return;
169 }
170
171 #ifdef ASSERT
172 // can't do because of races
173 // assert(obj == NULL || obj->is_oop(), "expected an oop");
174 assert(check_obj_alignment(obj), "not oop aligned");
175 assert(_g1->is_in_reserved(obj), "must be in heap");
176 #endif // ASSERT
177
178 assert(_from != NULL, "from region must be non-NULL");
179 assert(_from->is_in_reserved(p) ||
180 (_from->is_humongous() &&
181 _g1->heap_region_containing(p)->is_humongous() &&
182 _from->humongous_start_region() == _g1->heap_region_containing(p)->humongous_start_region()),
183 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
184 p2i(p), _from->hrm_index(), _from->humongous_start_region()->hrm_index());
185
200 // other times:
201 // * rebuilding the remembered sets after a full GC
202 // * during concurrent refinement.
203 // * updating the remembered sets of regions in the collection
204 // set in the event of an evacuation failure (when deferred
205 // updates are enabled).
206
207 if (_record_refs_into_cset && to->in_collection_set()) {
208 // We are recording references that point into the collection
209 // set and this particular reference does exactly that...
210 // If the referenced object has already been forwarded
211 // to itself, we are handling an evacuation failure and
212 // we have already visited/tried to copy this object
213 // there is no need to retry.
214 if (!self_forwarded(obj)) {
215 assert(_push_ref_cl != NULL, "should not be null");
216 // Push the reference in the refs queue of the G1ParScanThreadState
217 // instance for this worker thread.
218 _push_ref_cl->do_oop(p);
219 }
220
221 // Deferred updates to the CSet are either discarded (in the normal case),
222 // or processed (if an evacuation failure occurs) at the end
223 // of the collection.
224 // See G1RemSet::cleanup_after_oops_into_collection_set_do().
225 } else {
226 // We either don't care about pushing references that point into the
227 // collection set (i.e. we're not during an evacuation pause) _or_
228 // the reference doesn't point into the collection set. Either way
229 // we add the reference directly to the RSet of the region containing
230 // the referenced object.
231 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
232 to->rem_set()->add_reference(p, _worker_i);
233 }
234 }
235 void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_work(p); }
236 void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_work(p); }
237
238 template <class T>
239 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
240 if (_g1->heap_region_containing(new_obj)->is_young()) {
241 _scanned_klass->record_modified_oops();
242 }
243 }
244
245 void G1ParCopyHelper::mark_object(oop obj) {
246 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
247
248 // We know that the object is not moving so it's safe to read its size.
249 _cm->grayRoot(obj);
250 }
251
252 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
253 assert(from_obj->is_forwarded(), "from obj should be forwarded");
254 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
255 assert(from_obj != to_obj, "should not be self-forwarded");
256
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
30 #include "gc/g1/g1OopClosures.hpp"
31 #include "gc/g1/g1ParScanThreadState.inline.hpp"
32 #include "gc/g1/g1RemSet.hpp"
33 #include "gc/g1/g1RemSet.inline.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "memory/iterator.inline.hpp"
37 #include "runtime/prefetch.inline.hpp"
38
39 // This closure is applied to the fields of the objects that have just been copied.
40 template <class T>
41 inline void G1ParScanClosure::do_oop_nv(T* p) {
42 T heap_oop = oopDesc::load_heap_oop(p);
43
44 if (!oopDesc::is_null(heap_oop)) {
45 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
46 const InCSetState state = _g1->in_cset_state(obj);
47 if (state.is_in_cset()) {
48 // We're not going to even bother checking whether the object is
49 // already forwarded or not, as this usually causes an immediate
50 // stall. We'll try to prefetch the object (for write, given that
51 // we might need to install the forwarding reference) and we'll
52 // get back to it when pop it from the queue
53 Prefetch::write(obj->mark_addr(), 0);
54 Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
55
56 // slightly paranoid test; I'm trying to catch potential
57 // problems before we go into push_on_queue to know where the
58 // problem is coming from
94 }
95 }
96
97 template <class T>
98 inline void G1CMOopClosure::do_oop_nv(T* p) {
99 oop obj = oopDesc::load_decode_heap_oop(p);
100 _task->deal_with_reference(obj);
101 }
102
103 template <class T>
104 inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
105 T heap_oop = oopDesc::load_heap_oop(p);
106 if (!oopDesc::is_null(heap_oop)) {
107 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
108 HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
109 _cm->grayRoot(obj, hr);
110 }
111 }
112
113 template <class T>
114 inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
115 oop obj = oopDesc::load_decode_heap_oop(p);
116 if (obj == NULL) {
117 return;
118 }
119
120 #ifdef ASSERT
121 // can't do because of races
122 // assert(obj == NULL || obj->is_oop(), "expected an oop");
123 assert(check_obj_alignment(obj), "not oop aligned");
124 assert(_g1->is_in_reserved(obj), "must be in heap");
125 #endif // ASSERT
126
127 assert(_from != NULL, "from region must be non-NULL");
128 assert(_from->is_in_reserved(p) ||
129 (_from->is_humongous() &&
130 _g1->heap_region_containing(p)->is_humongous() &&
131 _from->humongous_start_region() == _g1->heap_region_containing(p)->humongous_start_region()),
132 "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
133 p2i(p), _from->hrm_index(), _from->humongous_start_region()->hrm_index());
134
149 // other times:
150 // * rebuilding the remembered sets after a full GC
151 // * during concurrent refinement.
152 // * updating the remembered sets of regions in the collection
153 // set in the event of an evacuation failure (when deferred
154 // updates are enabled).
155
156 if (_record_refs_into_cset && to->in_collection_set()) {
157 // We are recording references that point into the collection
158 // set and this particular reference does exactly that...
159 // If the referenced object has already been forwarded
160 // to itself, we are handling an evacuation failure and
161 // we have already visited/tried to copy this object
162 // there is no need to retry.
163 if (!self_forwarded(obj)) {
164 assert(_push_ref_cl != NULL, "should not be null");
165 // Push the reference in the refs queue of the G1ParScanThreadState
166 // instance for this worker thread.
167 _push_ref_cl->do_oop(p);
168 }
169 _has_refs_into_cset = true;
170
171 // Deferred updates to the CSet are either discarded (in the normal case),
172 // or processed (if an evacuation failure occurs) at the end
173 // of the collection.
174 // See G1RemSet::cleanup_after_oops_into_collection_set_do().
175 } else {
176 // We either don't care about pushing references that point into the
177 // collection set (i.e. we're not during an evacuation pause) _or_
178 // the reference doesn't point into the collection set. Either way
179 // we add the reference directly to the RSet of the region containing
180 // the referenced object.
181 assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
182 to->rem_set()->add_reference(p, _worker_i);
183 }
184 }
185 void G1UpdateRSOrPushRefOopClosure::do_oop(oop* p) { do_oop_nv(p); }
186 void G1UpdateRSOrPushRefOopClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
187
188 template <class T>
189 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
190 if (_g1->heap_region_containing(new_obj)->is_young()) {
191 _scanned_klass->record_modified_oops();
192 }
193 }
194
195 void G1ParCopyHelper::mark_object(oop obj) {
196 assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
197
198 // We know that the object is not moving so it's safe to read its size.
199 _cm->grayRoot(obj);
200 }
201
202 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
203 assert(from_obj->is_forwarded(), "from obj should be forwarded");
204 assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
205 assert(from_obj != to_obj, "should not be self-forwarded");
206
|