1 /*
2 * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
26
27 #include "classfile/javaClasses.hpp"
28 #include "gc/z/zAddress.inline.hpp"
29 #include "gc/z/zBarrier.hpp"
30 #include "gc/z/zOop.inline.hpp"
31 #include "gc/z/zResurrection.inline.hpp"
32 #include "oops/oop.hpp"
33 #include "runtime/atomic.hpp"
34
35 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
36 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
37 uintptr_t addr = ZOop::to_address(o);
38
39 retry:
40 // Fast path
41 if (fast_path(addr)) {
42 return ZOop::from_address(addr);
43 }
44
45 // Slow path
46 const uintptr_t good_addr = slow_path(addr);
47
48 // Self heal, but only if the address was actually updated by the slow path,
49 // which might not be the case, e.g. when marking through an already good oop.
50 if (p != NULL && good_addr != addr) {
51 const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
52 if (prev_addr != addr) {
53 // Some other thread overwrote the oop. If this oop was updated by a
54 // weak barrier the new oop might not be good, in which case we need
55 // to re-apply this barrier.
56 addr = prev_addr;
57 goto retry;
58 }
59 }
60
61 return ZOop::from_address(good_addr);
62 }
63
64 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
65 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
66 const uintptr_t addr = ZOop::to_address(o);
67
68 // Fast path
69 if (fast_path(addr)) {
70 // Return the good address instead of the weak good address
71 // to ensure that the currently active heap view is used.
72 return ZOop::from_address(ZAddress::good_or_null(addr));
73 }
74
75 // Slow path
76 uintptr_t good_addr = slow_path(addr);
77
78 // Self heal unless the address returned from the slow path is null,
79 // in which case resurrection was blocked and we must let the reference
80 // processor clear the oop. Mutators are not allowed to clear oops in
81 // these cases, since that would be similar to calling Reference.clear(),
82 // which would make the reference non-discoverable or silently dropped
83 // by the reference processor.
84 if (p != NULL && good_addr != 0) {
85 // The slow path returns a good/marked address, but we never mark oops
86 // in a weak load barrier so we always self heal with the remapped address.
87 const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
88 const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
89 if (prev_addr != addr) {
90 // Some other thread overwrote the oop. The new
91 // oop is guaranteed to be weak good or null.
92 assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
93
94 // Return the good address instead of the weak good address
95 // to ensure that the currently active heap view is used.
96 good_addr = ZAddress::good_or_null(prev_addr);
97 }
98 }
99
100 return ZOop::from_address(good_addr);
101 }
102
103 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
104 inline void ZBarrier::root_barrier(oop* p, oop o) {
105 const uintptr_t addr = ZOop::to_address(o);
106
107 // Fast path
108 if (fast_path(addr)) {
109 return;
110 }
111
112 // Slow path
113 const uintptr_t good_addr = slow_path(addr);
114
115 // Non-atomic healing helps speed up root scanning. This is safe to do
116 // since we are always healing roots in a safepoint, or under a lock,
117 // which ensures we are never racing with mutators modifying roots while
118 // we are healing them. It's also safe in case multiple GC threads try
119 // to heal the same root if it is aligned, since they would always heal
120 // the root in the same way and it does not matter in which order it
117 // which ensures we are never racing with mutators modifying roots while
118 // we are healing them. It's also safe in case multiple GC threads try
119 // to heal the same root if it is aligned, since they would always heal
120 // the root in the same way and it does not matter in which order it
121 // happens. For misaligned oops, there needs to be mutual exclusion.
122 *p = ZOop::from_address(good_addr);
123 }
124
125 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
126 return ZAddress::is_null(addr);
127 }
128
129 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
130 return ZAddress::is_good_or_null(addr);
131 }
132
133 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
134 return ZAddress::is_weak_good_or_null(addr);
135 }
136
137 inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
138 const bool is_blocked = ZResurrection::is_blocked();
139
140 // Reload oop after checking the resurrection blocked state. This is
141 // done to prevent a race where we first load an oop, which is logically
142 // null but not yet cleared, then this oop is cleared by the reference
143 // processor and resurrection is unblocked. At this point the mutator
144 // would see the unblocked state and pass this invalid oop through the
145 // normal barrier path, which would incorrectly try to mark this oop.
146 if (p != NULL) {
147 // First assign to reloaded_o to avoid compiler warning about
148 // implicit dereference of volatile oop.
149 const oop reloaded_o = *p;
150 *o = reloaded_o;
151 }
152
153 return is_blocked;
154 }
155
156 //
157 // Load barrier
158 //
159 inline oop ZBarrier::load_barrier_on_oop(oop o) {
160 return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
161 }
162
163 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
164 const oop o = *p;
165 return load_barrier_on_oop_field_preloaded(p, o);
166 }
167
168 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
169 return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
170 }
171
172 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
173 for (volatile const oop* const end = p + length; p < end; p++) {
174 load_barrier_on_oop_field(p);
175 }
173 for (volatile const oop* const end = p + length; p < end; p++) {
174 load_barrier_on_oop_field(p);
175 }
176 }
177
178 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
179 inline void verify_on_weak(volatile oop* referent_addr) {
180 #ifdef ASSERT
181 if (referent_addr != NULL) {
182 uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset;
183 oop obj = cast_to_oop(base);
184 assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
185 assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity");
186 }
187 #endif
188 }
189
190 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
191 verify_on_weak(p);
192
193 if (is_resurrection_blocked(p, &o)) {
194 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
195 }
196
197 return load_barrier_on_oop_field_preloaded(p, o);
198 }
199
200 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
201 if (is_resurrection_blocked(p, &o)) {
202 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
203 }
204
205 return load_barrier_on_oop_field_preloaded(p, o);
206 }
207
208 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
209 const oop o = *p;
210 root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
211 }
212
213 //
214 // Weak load barrier
215 //
216 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
217 assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
218 const oop o = *p;
219 return weak_load_barrier_on_oop_field_preloaded(p, o);
220 }
221
218 const oop o = *p;
219 return weak_load_barrier_on_oop_field_preloaded(p, o);
220 }
221
222 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
223 return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
224 }
225
226 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
227 return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
228 }
229
230 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
231 const oop o = *p;
232 return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
233 }
234
235 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
236 verify_on_weak(p);
237
238 if (is_resurrection_blocked(p, &o)) {
239 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
240 }
241
242 return weak_load_barrier_on_oop_field_preloaded(p, o);
243 }
244
245 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
246 return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
247 }
248
249 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
250 const oop o = *p;
251 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
252 }
253
254 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
255 if (is_resurrection_blocked(p, &o)) {
256 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
257 }
258
259 return weak_load_barrier_on_oop_field_preloaded(p, o);
260 }
261
262 //
263 // Is alive barrier
264 //
265 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
266 // Check if oop is logically non-null. This operation
267 // is only valid when resurrection is blocked.
268 assert(ZResurrection::is_blocked(), "Invalid phase");
269 return weak_load_barrier_on_weak_oop(o) != NULL;
270 }
271
272 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
273 // Check if oop is logically non-null. This operation
274 // is only valid when resurrection is blocked.
275 assert(ZResurrection::is_blocked(), "Invalid phase");
|
1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
26
27 #include "classfile/javaClasses.hpp"
28 #include "gc/z/zAddress.inline.hpp"
29 #include "gc/z/zBarrier.hpp"
30 #include "gc/z/zOop.inline.hpp"
31 #include "gc/z/zResurrection.inline.hpp"
32 #include "oops/oop.hpp"
33 #include "runtime/atomic.hpp"
34
35 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
36 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
37 uintptr_t addr = ZOop::to_address(o);
38
39 if (fast_path(addr)) {
40 return ZOop::from_address(addr);
41 }
42
43 uintptr_t good_addr = slow_path(addr);
44 const oop result = ZOop::from_address(good_addr);
45
46 // Self heal, but only if the address was actually updated by the slow path,
47 // which might not be the case, e.g. when marking through an already good oop.
48 while (p != NULL && good_addr != addr) {
49 const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
50 if (prev_addr != addr) {
51 // Some other thread overwrote the oop. If this oop was updated by a
52 // weak barrier the new oop might not be good, in which case we need
53 // to re-apply this barrier.
54 addr = prev_addr;
55 // Fast path
56 if (fast_path(addr)) {
57 break;
58 }
59
60 // Slow path
61 good_addr = slow_path(addr);
62 }
63 }
64
65 return result;
66 }
67
68 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
69 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
70 const uintptr_t addr = ZOop::to_address(o);
71
72 if (fast_path(addr)) {
73 // Return the good address instead of the weak good address
74 // to ensure that the currently active heap view is used.
75 return ZOop::from_address(ZAddress::good_or_null(addr));
76 }
77
78 const uintptr_t good_addr = slow_path(addr);
79 const oop result = ZOop::from_address(good_addr);
80
81 // Self heal unless the address returned from the slow path is null,
82 // in which case resurrection was blocked and we must let the reference
83 // processor clear the oop. Mutators are not allowed to clear oops in
84 // these cases, since that would be similar to calling Reference.clear(),
85 // which would make the reference non-discoverable or silently dropped
86 // by the reference processor.
87 if (p != NULL && good_addr != 0) {
88 // The slow path returns a good/marked address, but we never mark oops
89 // in a weak load barrier so we always self heal with the remapped address.
90 const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
91 Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
92 }
93
94 return result;
95 }
96
97 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
98 inline void ZBarrier::root_barrier(oop* p, oop o) {
99 const uintptr_t addr = ZOop::to_address(o);
100
101 // Fast path
102 if (fast_path(addr)) {
103 return;
104 }
105
106 // Slow path
107 const uintptr_t good_addr = slow_path(addr);
108
109 // Non-atomic healing helps speed up root scanning. This is safe to do
110 // since we are always healing roots in a safepoint, or under a lock,
111 // which ensures we are never racing with mutators modifying roots while
112 // we are healing them. It's also safe in case multiple GC threads try
113 // to heal the same root if it is aligned, since they would always heal
114 // the root in the same way and it does not matter in which order it
111 // which ensures we are never racing with mutators modifying roots while
112 // we are healing them. It's also safe in case multiple GC threads try
113 // to heal the same root if it is aligned, since they would always heal
114 // the root in the same way and it does not matter in which order it
115 // happens. For misaligned oops, there needs to be mutual exclusion.
116 *p = ZOop::from_address(good_addr);
117 }
118
119 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
120 return ZAddress::is_null(addr);
121 }
122
123 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
124 return ZAddress::is_good_or_null(addr);
125 }
126
127 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
128 return ZAddress::is_weak_good_or_null(addr);
129 }
130
131 //
132 // Load barrier
133 //
134 inline oop ZBarrier::load_barrier_on_oop(oop o) {
135 return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
136 }
137
138 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
139 const oop o = *p;
140 return load_barrier_on_oop_field_preloaded(p, o);
141 }
142
143 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
144 return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
145 }
146
147 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
148 for (volatile const oop* const end = p + length; p < end; p++) {
149 load_barrier_on_oop_field(p);
150 }
148 for (volatile const oop* const end = p + length; p < end; p++) {
149 load_barrier_on_oop_field(p);
150 }
151 }
152
153 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
154 inline void verify_on_weak(volatile oop* referent_addr) {
155 #ifdef ASSERT
156 if (referent_addr != NULL) {
157 uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset;
158 oop obj = cast_to_oop(base);
159 assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
160 assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity");
161 }
162 #endif
163 }
164
165 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
166 verify_on_weak(p);
167
168 if (ZResurrection::is_blocked()) {
169 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
170 }
171
172 return load_barrier_on_oop_field_preloaded(p, o);
173 }
174
175 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
176 if (ZResurrection::is_blocked()) {
177 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
178 }
179
180 return load_barrier_on_oop_field_preloaded(p, o);
181 }
182
183 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
184 const oop o = *p;
185 root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
186 }
187
188 //
189 // Weak load barrier
190 //
191 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
192 assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
193 const oop o = *p;
194 return weak_load_barrier_on_oop_field_preloaded(p, o);
195 }
196
193 const oop o = *p;
194 return weak_load_barrier_on_oop_field_preloaded(p, o);
195 }
196
197 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
198 return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
199 }
200
201 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
202 return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
203 }
204
205 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
206 const oop o = *p;
207 return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
208 }
209
210 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
211 verify_on_weak(p);
212
213 if (ZResurrection::is_blocked()) {
214 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
215 }
216
217 return weak_load_barrier_on_oop_field_preloaded(p, o);
218 }
219
220 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
221 return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
222 }
223
224 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
225 const oop o = *p;
226 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
227 }
228
229 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
230 if (ZResurrection::is_blocked()) {
231 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
232 }
233
234 return weak_load_barrier_on_oop_field_preloaded(p, o);
235 }
236
237 //
238 // Is alive barrier
239 //
240 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
241 // Check if oop is logically non-null. This operation
242 // is only valid when resurrection is blocked.
243 assert(ZResurrection::is_blocked(), "Invalid phase");
244 return weak_load_barrier_on_weak_oop(o) != NULL;
245 }
246
247 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
248 // Check if oop is logically non-null. This operation
249 // is only valid when resurrection is blocked.
250 assert(ZResurrection::is_blocked(), "Invalid phase");
|