1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
26
27 #include "classfile/javaClasses.hpp"
28 #include "gc/z/zAddress.inline.hpp"
29 #include "gc/z/zBarrier.hpp"
30 #include "gc/z/zOop.inline.hpp"
31 #include "gc/z/zResurrection.inline.hpp"
32 #include "oops/oop.hpp"
33 #include "runtime/atomic.hpp"
34
35 inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
36 if (heal_addr == 0) {
37 // Never heal with null since it interacts badly with reference processing.
38 // A mutator clearing an oop would be similar to calling Reference.clear(),
39 // which would make the reference non-discoverable or silently dropped
40 // by the reference processor.
41 return;
42 }
43
44 for (;;) {
45 if (addr == heal_addr) {
46 // Already healed
47 return;
48 }
49
50 // Heal
51 const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr);
52 if (prev_addr == addr) {
53 // Success
54 return;
55 }
56
57 if (ZAddress::is_good_or_null(prev_addr)) {
58 // No need to heal
59 return;
60 }
61
62 // The oop location was healed by another barrier, but it is still not
63 // good or null. Re-apply healing to make sure the oop is not left with
64 // weaker (remapped or finalizable) metadata bits than what this barrier
65 // tried to apply.
66 assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");
67 addr = prev_addr;
68 }
69 }
70
71 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
72 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
73 uintptr_t addr = ZOop::to_address(o);
74
75 // Fast path
76 if (fast_path(addr)) {
77 return ZOop::from_address(addr);
78 }
79
80 // Slow path
81 const uintptr_t good_addr = slow_path(addr);
82
83 if (p != NULL) {
84 self_heal(p, addr, good_addr);
85 }
86
87 return ZOop::from_address(good_addr);
88 }
89
90 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
91 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
92 const uintptr_t addr = ZOop::to_address(o);
93
94 // Fast path
95 if (fast_path(addr)) {
96 // Return the good address instead of the weak good address
97 // to ensure that the currently active heap view is used.
98 return ZOop::from_address(ZAddress::good_or_null(addr));
99 }
100
101 // Slow path
102 const uintptr_t good_addr = slow_path(addr);
103
104 if (p != NULL) {
105 // The slow path returns a good/marked address or null, but we never mark
106 // oops in a weak load barrier so we always heal with the remapped address.
107 self_heal(p, addr, ZAddress::remapped_or_null(good_addr));
108 }
109
110 return ZOop::from_address(good_addr);
111 }
112
113 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
114 inline void ZBarrier::root_barrier(oop* p, oop o) {
115 const uintptr_t addr = ZOop::to_address(o);
116
117 // Fast path
118 if (fast_path(addr)) {
119 return;
120 }
121
122 // Slow path
123 const uintptr_t good_addr = slow_path(addr);
124
125 // Non-atomic healing helps speed up root scanning. This is safe to do
126 // since we are always healing roots in a safepoint, or under a lock,
127 // which ensures we are never racing with mutators modifying roots while
128 // we are healing them. It's also safe in case multiple GC threads try
129 // to heal the same root if it is aligned, since they would always heal
130 // the root in the same way and it does not matter in which order it
131 // happens. For misaligned oops, there needs to be mutual exclusion.
132 *p = ZOop::from_address(good_addr);
133 }
134
135 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
136 return ZAddress::is_null(addr);
137 }
138
139 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
140 return ZAddress::is_good_or_null(addr);
141 }
142
143 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
144 return ZAddress::is_weak_good_or_null(addr);
145 }
146
147 //
148 // Load barrier
149 //
150 inline oop ZBarrier::load_barrier_on_oop(oop o) {
151 return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
152 }
153
154 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
155 const oop o = *p;
156 return load_barrier_on_oop_field_preloaded(p, o);
157 }
158
159 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
160 return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
161 }
162
163 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
164 for (volatile const oop* const end = p + length; p < end; p++) {
165 load_barrier_on_oop_field(p);
166 }
167 }
168
169 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
170 inline void verify_on_weak(volatile oop* referent_addr) {
171 #ifdef ASSERT
172 if (referent_addr != NULL) {
173 uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset;
174 oop obj = cast_to_oop(base);
175 assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
176 assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity");
177 }
178 #endif
179 }
180
181 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
182 verify_on_weak(p);
183
184 if (ZResurrection::is_blocked()) {
185 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
186 }
187
188 return load_barrier_on_oop_field_preloaded(p, o);
189 }
190
191 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
192 if (ZResurrection::is_blocked()) {
193 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
194 }
195
196 return load_barrier_on_oop_field_preloaded(p, o);
197 }
198
199 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
200 const oop o = *p;
201 root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
202 }
203
204 //
205 // Weak load barrier
206 //
207 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
208 assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
209 const oop o = *p;
210 return weak_load_barrier_on_oop_field_preloaded(p, o);
211 }
212
213 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
214 return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
215 }
216
217 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
218 return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
219 }
220
221 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
222 const oop o = *p;
223 return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
224 }
225
226 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
227 verify_on_weak(p);
228
229 if (ZResurrection::is_blocked()) {
230 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
231 }
232
233 return weak_load_barrier_on_oop_field_preloaded(p, o);
234 }
235
236 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
237 return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
238 }
239
240 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
241 const oop o = *p;
242 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
243 }
244
245 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
246 if (ZResurrection::is_blocked()) {
247 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
248 }
249
250 return weak_load_barrier_on_oop_field_preloaded(p, o);
251 }
252
253 //
254 // Is alive barrier
255 //
256 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
257 // Check if oop is logically non-null. This operation
258 // is only valid when resurrection is blocked.
259 assert(ZResurrection::is_blocked(), "Invalid phase");
260 return weak_load_barrier_on_weak_oop(o) != NULL;
261 }
262
263 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
264 // Check if oop is logically non-null. This operation
265 // is only valid when resurrection is blocked.
266 assert(ZResurrection::is_blocked(), "Invalid phase");
267 return weak_load_barrier_on_phantom_oop(o) != NULL;
268 }
269
270 //
271 // Keep alive barrier
272 //
273 inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
274 // This operation is only valid when resurrection is blocked.
275 assert(ZResurrection::is_blocked(), "Invalid phase");
276 const oop o = *p;
277 barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
278 }
279
280 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
281 // This operation is only valid when resurrection is blocked.
282 assert(ZResurrection::is_blocked(), "Invalid phase");
283 const oop o = *p;
284 barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
285 }
286
287 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
288 // This operation is only valid when resurrection is blocked.
289 assert(ZResurrection::is_blocked(), "Invalid phase");
290 const oop o = *p;
291 root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
292 }
293
294 //
295 // Mark barrier
296 //
297 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
298 // The fast path only checks for null since the GC worker
299 // threads doing marking wants to mark through good oops.
300 const oop o = *p;
301
302 if (finalizable) {
303 barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
304 } else {
305 barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
306 }
307 }
308
309 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
310 for (volatile const oop* const end = p + length; p < end; p++) {
311 mark_barrier_on_oop_field(p, finalizable);
312 }
313 }
314
315 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
316 const oop o = *p;
317 root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
318 }
319
320 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
321 const oop o = *p;
322 root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
323 }
324
325 //
326 // Relocate barrier
327 //
328 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
329 const oop o = *p;
330 root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
331 }
332
333 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP
--- EOF ---