1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP 25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP 26 27 #include "classfile/javaClasses.hpp" 28 #include "gc/z/zAddress.inline.hpp" 29 #include "gc/z/zBarrier.hpp" 30 #include "gc/z/zOop.inline.hpp" 31 #include "gc/z/zResurrection.inline.hpp" 32 #include "oops/oop.hpp" 33 #include "runtime/atomic.hpp" 34 35 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> 36 inline oop ZBarrier::barrier(volatile oop* p, oop o) { 37 uintptr_t addr = ZOop::to_address(o); 38 39 if (fast_path(addr)) { 40 return ZOop::from_address(addr); 41 } 42 43 uintptr_t good_addr = slow_path(addr); 44 const oop result = ZOop::from_address(good_addr); 45 46 // Self heal, but only if the address was actually updated by the slow path, 47 // which might not be the case, e.g. when marking through an already good oop. 48 while (p != NULL && good_addr != addr) { 49 const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr); 50 if (prev_addr != addr) { 51 // Some other thread overwrote the oop. If this oop was updated by a 52 // weak barrier the new oop might not be good, in which case we need 53 // to re-apply this barrier. 54 addr = prev_addr; 55 // Fast path 56 if (fast_path(addr)) { 57 break; 58 } 59 60 // Slow path 61 good_addr = slow_path(addr); 62 } 63 } 64 65 return result; 66 } 67 68 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> 69 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) { 70 const uintptr_t addr = ZOop::to_address(o); 71 72 if (fast_path(addr)) { 73 // Return the good address instead of the weak good address 74 // to ensure that the currently active heap view is used. 75 return ZOop::from_address(ZAddress::good_or_null(addr)); 76 } 77 78 const uintptr_t good_addr = slow_path(addr); 79 const oop result = ZOop::from_address(good_addr); 80 81 // Self heal unless the address returned from the slow path is null, 82 // in which case resurrection was blocked and we must let the reference 83 // processor clear the oop. Mutators are not allowed to clear oops in 84 // these cases, since that would be similar to calling Reference.clear(), 85 // which would make the reference non-discoverable or silently dropped 86 // by the reference processor. 87 if (p != NULL && good_addr != 0) { 88 // The slow path returns a good/marked address, but we never mark oops 89 // in a weak load barrier so we always self heal with the remapped address. 90 const uintptr_t weak_good_addr = ZAddress::remapped(good_addr); 91 Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr); 92 } 93 94 return result; 95 } 96 97 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> 98 inline void ZBarrier::root_barrier(oop* p, oop o) { 99 const uintptr_t addr = ZOop::to_address(o); 100 101 // Fast path 102 if (fast_path(addr)) { 103 return; 104 } 105 106 // Slow path 107 const uintptr_t good_addr = slow_path(addr); 108 109 // Non-atomic healing helps speed up root scanning. This is safe to do 110 // since we are always healing roots in a safepoint, or under a lock, 111 // which ensures we are never racing with mutators modifying roots while 112 // we are healing them. It's also safe in case multiple GC threads try 113 // to heal the same root if it is aligned, since they would always heal 114 // the root in the same way and it does not matter in which order it 115 // happens. For misaligned oops, there needs to be mutual exclusion. 116 *p = ZOop::from_address(good_addr); 117 } 118 119 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) { 120 return ZAddress::is_null(addr); 121 } 122 123 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) { 124 return ZAddress::is_good_or_null(addr); 125 } 126 127 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { 128 return ZAddress::is_weak_good_or_null(addr); 129 } 130 131 // 132 // Load barrier 133 // 134 inline oop ZBarrier::load_barrier_on_oop(oop o) { 135 return load_barrier_on_oop_field_preloaded((oop*)NULL, o); 136 } 137 138 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) { 139 const oop o = *p; 140 return load_barrier_on_oop_field_preloaded(p, o); 141 } 142 143 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { 144 return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o); 145 } 146 147 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) { 148 for (volatile const oop* const end = p + length; p < end; p++) { 149 load_barrier_on_oop_field(p); 150 } 151 } 152 153 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. 154 inline void verify_on_weak(volatile oop* referent_addr) { 155 #ifdef ASSERT 156 if (referent_addr != NULL) { 157 uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset; 158 oop obj = cast_to_oop(base); 159 assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); 160 assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity"); 161 } 162 #endif 163 } 164 165 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { 166 verify_on_weak(p); 167 168 if (ZResurrection::is_blocked()) { 169 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); 170 } 171 172 return load_barrier_on_oop_field_preloaded(p, o); 173 } 174 175 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { 176 if (ZResurrection::is_blocked()) { 177 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); 178 } 179 180 return load_barrier_on_oop_field_preloaded(p, o); 181 } 182 183 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) { 184 const oop o = *p; 185 root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o); 186 } 187 188 // 189 // Weak load barrier 190 // 191 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) { 192 assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase"); 193 const oop o = *p; 194 return weak_load_barrier_on_oop_field_preloaded(p, o); 195 } 196 197 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { 198 return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o); 199 } 200 201 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) { 202 return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o); 203 } 204 205 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) { 206 const oop o = *p; 207 return weak_load_barrier_on_weak_oop_field_preloaded(p, o); 208 } 209 210 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { 211 verify_on_weak(p); 212 213 if (ZResurrection::is_blocked()) { 214 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); 215 } 216 217 return weak_load_barrier_on_oop_field_preloaded(p, o); 218 } 219 220 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) { 221 return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o); 222 } 223 224 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) { 225 const oop o = *p; 226 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); 227 } 228 229 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { 230 if (ZResurrection::is_blocked()) { 231 return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); 232 } 233 234 return weak_load_barrier_on_oop_field_preloaded(p, o); 235 } 236 237 // 238 // Is alive barrier 239 // 240 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) { 241 // Check if oop is logically non-null. This operation 242 // is only valid when resurrection is blocked. 243 assert(ZResurrection::is_blocked(), "Invalid phase"); 244 return weak_load_barrier_on_weak_oop(o) != NULL; 245 } 246 247 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) { 248 // Check if oop is logically non-null. This operation 249 // is only valid when resurrection is blocked. 250 assert(ZResurrection::is_blocked(), "Invalid phase"); 251 return weak_load_barrier_on_phantom_oop(o) != NULL; 252 } 253 254 // 255 // Keep alive barrier 256 // 257 inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) { 258 // This operation is only valid when resurrection is blocked. 259 assert(ZResurrection::is_blocked(), "Invalid phase"); 260 const oop o = *p; 261 barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o); 262 } 263 264 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) { 265 // This operation is only valid when resurrection is blocked. 266 assert(ZResurrection::is_blocked(), "Invalid phase"); 267 const oop o = *p; 268 barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o); 269 } 270 271 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { 272 // This operation is only valid when resurrection is blocked. 273 assert(ZResurrection::is_blocked(), "Invalid phase"); 274 const oop o = *p; 275 root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o); 276 } 277 278 // 279 // Mark barrier 280 // 281 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) { 282 // The fast path only checks for null since the GC worker 283 // threads doing marking wants to mark through good oops. 284 const oop o = *p; 285 286 if (finalizable) { 287 barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o); 288 } else { 289 barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o); 290 } 291 } 292 293 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) { 294 for (volatile const oop* const end = p + length; p < end; p++) { 295 mark_barrier_on_oop_field(p, finalizable); 296 } 297 } 298 299 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) { 300 const oop o = *p; 301 root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o); 302 } 303 304 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) { 305 const oop o = *p; 306 root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o); 307 } 308 309 // 310 // Relocate barrier 311 // 312 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) { 313 const oop o = *p; 314 root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o); 315 } 316 317 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP