1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP 25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP 26 27 #include "classfile/javaClasses.hpp" 28 #include "gc/z/zAddress.inline.hpp" 29 #include "gc/z/zBarrier.hpp" 30 #include "gc/z/zOop.inline.hpp" 31 #include "gc/z/zResurrection.inline.hpp" 32 #include "oops/oop.hpp" 33 #include "runtime/atomic.hpp" 34 35 // A self heal must always "upgrade" the address metadata bits in 36 // accordance with the metadata bits state machine, which has the 37 // valid state transitions as described below (where N is the GC 38 // cycle). 39 // 40 // Note the subtleness of overlapping GC cycles. Specifically that 41 // oops are colored Remapped(N) starting at relocation N and ending 42 // at marking N + 1. 43 // 44 // +--- Mark Start 45 // | +--- Mark End 46 // | | +--- Relocate Start 47 // | | | +--- Relocate End 48 // | | | | 49 // Marked |---N---|--N+1--|--N+2--|---- 50 // Finalizable |---N---|--N+1--|--N+2--|---- 51 // Remapped ----|---N---|--N+1--|--N+2--| 52 // 53 // VALID STATE TRANSITIONS 54 // 55 // Marked(N) -> Remapped(N) 56 // -> Marked(N + 1) 57 // -> Finalizable(N + 1) 58 // 59 // Finalizable(N) -> Marked(N) 60 // -> Remapped(N) 61 // -> Marked(N + 1) 62 // -> Finalizable(N + 1) 63 // 64 // Remapped(N) -> Marked(N + 1) 65 // -> Finalizable(N + 1) 66 // 67 // PHASE VIEW 68 // 69 // ZPhaseMark 70 // Load & Mark 71 // Marked(N) <- Marked(N - 1) 72 // <- Finalizable(N - 1) 73 // <- Remapped(N - 1) 74 // <- Finalizable(N) 75 // 76 // Mark(Finalizable) 77 // Finalizable(N) <- Marked(N - 1) 78 // <- Finalizable(N - 1) 79 // <- Remapped(N - 1) 80 // 81 // Load(AS_NO_KEEPALIVE) 82 // Remapped(N - 1) <- Finalizable(N - 1) 83 // 84 // ZPhaseMarkCompleted (Resurrection blocked) 85 // Load & Load(AS_NO_KEEPALIVE) & KeepAlive 86 // Marked(N) <- Marked(N - 1) 87 // <- Finalizable(N - 1) 88 // <- Remapped(N -1) 89 // <- Finalizable(N) 90 // 91 // ZPhaseMarkCompleted (Resurrection unblocked) 92 // Load & Load(AS_NO_KEEPALIVE) 93 // Marked(N) <- Finalizable(N) 94 // 95 // ZPhaseRelocate 96 // Load & Load(AS_NO_KEEPALIVE) 97 // Remapped(N) <- Marked(N) 98 // <- Finalizable(N) 99 100 template <ZBarrierFastPath fast_path> 101 inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) { 102 if (heal_addr == 0) { 103 // Never heal with null since it interacts badly with reference processing. 104 // A mutator clearing an oop would be similar to calling Reference.clear(), 105 // which would make the reference non-discoverable or silently dropped 106 // by the reference processor. 107 return; 108 } 109 110 assert(!fast_path(addr), "Invalid self heal"); 111 assert(fast_path(heal_addr), "Invalid self heal"); 112 113 for (;;) { 114 // Heal 115 const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr); 116 if (prev_addr == addr) { 117 // Success 118 return; 119 } 120 121 if (fast_path(prev_addr)) { 122 // Must not self heal 123 return; 124 } 125 126 // The oop location was healed by another barrier, but still needs upgrading. 127 // Re-apply healing to make sure the oop is not left with weaker (remapped or 128 // finalizable) metadata bits than what this barrier tried to apply. 129 assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset"); 130 addr = prev_addr; 131 } 132 } 133 134 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> 135 inline oop ZBarrier::barrier(volatile oop* p, oop o) { 136 const uintptr_t addr = ZOop::to_address(o); 137 138 // Fast path 139 if (fast_path(addr)) { 140 return ZOop::from_address(addr); 141 } 142 143 // Slow path 144 const uintptr_t good_addr = slow_path(addr); 145 146 if (p != NULL) { 147 self_heal<fast_path>(p, addr, good_addr); 148 } 149 150 return ZOop::from_address(good_addr); 151 } 152 153 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> 154 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) { 155 const uintptr_t addr = ZOop::to_address(o); 156 157 // Fast path 158 if (fast_path(addr)) { 159 // Return the good address instead of the weak good address 160 // to ensure that the currently active heap view is used. 161 return ZOop::from_address(ZAddress::good_or_null(addr)); 162 } 163 164 // Slow path 165 const uintptr_t good_addr = slow_path(addr); 166 167 if (p != NULL) { 168 // The slow path returns a good/marked address or null, but we never mark 169 // oops in a weak load barrier so we always heal with the remapped address. 170 self_heal<fast_path>(p, addr, ZAddress::remapped_or_null(good_addr)); 171 } 172 173 return ZOop::from_address(good_addr); 174 } 175 176 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> 177 inline void ZBarrier::root_barrier(oop* p, oop o) { 178 const uintptr_t addr = ZOop::to_address(o); 179 180 // Fast path 181 if (fast_path(addr)) { 182 return; 183 } 184 185 // Slow path 186 const uintptr_t good_addr = slow_path(addr); 187 188 // Non-atomic healing helps speed up root scanning. This is safe to do 189 // since we are always healing roots in a safepoint, or under a lock, 190 // which ensures we are never racing with mutators modifying roots while 191 // we are healing them. It's also safe in case multiple GC threads try 192 // to heal the same root if it is aligned, since they would always heal 193 // the root in the same way and it does not matter in which order it 194 // happens. For misaligned oops, there needs to be mutual exclusion. 195 *p = ZOop::from_address(good_addr); 196 } 197 198 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) { 199 return ZAddress::is_good_or_null(addr); 200 } 201 202 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) { 203 return ZAddress::is_weak_good_or_null(addr); 204 } 205 206 inline bool ZBarrier::is_marked_or_null_fast_path(uintptr_t addr) { 207 return ZAddress::is_marked_or_null(addr); 208 } 209 210 inline bool ZBarrier::during_mark() { 211 return ZGlobalPhase == ZPhaseMark; 212 } 213 214 inline bool ZBarrier::during_relocate() { 215 return ZGlobalPhase == ZPhaseRelocate; 216 } 217 218 // 219 // Load barrier 220 // 221 inline oop ZBarrier::load_barrier_on_oop(oop o) { 222 return load_barrier_on_oop_field_preloaded((oop*)NULL, o); 223 } 224 225 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) { 226 const oop o = *p; 227 return load_barrier_on_oop_field_preloaded(p, o); 228 } 229 230 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { 231 return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o); 232 } 233 234 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) { 235 for (volatile const oop* const end = p + length; p < end; p++) { 236 load_barrier_on_oop_field(p); 237 } 238 } 239 240 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. 241 inline void verify_on_weak(volatile oop* referent_addr) { 242 #ifdef ASSERT 243 if (referent_addr != NULL) { 244 uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset; 245 oop obj = cast_to_oop(base); 246 assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); 247 assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity"); 248 } 249 #endif 250 } 251 252 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { 253 verify_on_weak(p); 254 255 if (ZResurrection::is_blocked()) { 256 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); 257 } 258 259 return load_barrier_on_oop_field_preloaded(p, o); 260 } 261 262 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { 263 if (ZResurrection::is_blocked()) { 264 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); 265 } 266 267 return load_barrier_on_oop_field_preloaded(p, o); 268 } 269 270 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) { 271 const oop o = *p; 272 root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o); 273 } 274 275 // 276 // Weak load barrier 277 // 278 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) { 279 assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase"); 280 const oop o = *p; 281 return weak_load_barrier_on_oop_field_preloaded(p, o); 282 } 283 284 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) { 285 return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o); 286 } 287 288 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) { 289 return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o); 290 } 291 292 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) { 293 const oop o = *p; 294 return weak_load_barrier_on_weak_oop_field_preloaded(p, o); 295 } 296 297 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { 298 verify_on_weak(p); 299 300 if (ZResurrection::is_blocked()) { 301 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); 302 } 303 304 return weak_load_barrier_on_oop_field_preloaded(p, o); 305 } 306 307 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) { 308 return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o); 309 } 310 311 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) { 312 const oop o = *p; 313 return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); 314 } 315 316 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { 317 if (ZResurrection::is_blocked()) { 318 return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); 319 } 320 321 return weak_load_barrier_on_oop_field_preloaded(p, o); 322 } 323 324 // 325 // Is alive barrier 326 // 327 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) { 328 // Check if oop is logically non-null. This operation 329 // is only valid when resurrection is blocked. 330 assert(ZResurrection::is_blocked(), "Invalid phase"); 331 return weak_load_barrier_on_weak_oop(o) != NULL; 332 } 333 334 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) { 335 // Check if oop is logically non-null. This operation 336 // is only valid when resurrection is blocked. 337 assert(ZResurrection::is_blocked(), "Invalid phase"); 338 return weak_load_barrier_on_phantom_oop(o) != NULL; 339 } 340 341 // 342 // Keep alive barrier 343 // 344 inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) { 345 // This operation is only valid when resurrection is blocked. 346 assert(ZResurrection::is_blocked(), "Invalid phase"); 347 const oop o = *p; 348 barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o); 349 } 350 351 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) { 352 // This operation is only valid when resurrection is blocked. 353 assert(ZResurrection::is_blocked(), "Invalid phase"); 354 const oop o = *p; 355 barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o); 356 } 357 358 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) { 359 // This operation is only valid when resurrection is blocked. 360 assert(ZResurrection::is_blocked(), "Invalid phase"); 361 const oop o = *p; 362 root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o); 363 } 364 365 inline void ZBarrier::keep_alive_barrier_on_oop(oop o) { 366 const uintptr_t addr = ZOop::to_address(o); 367 assert(ZAddress::is_good(addr), "Invalid address"); 368 369 if (during_mark()) { 370 mark_barrier_on_oop_slow_path(addr); 371 } 372 } 373 374 // 375 // Mark barrier 376 // 377 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) { 378 const oop o = *p; 379 380 if (finalizable) { 381 barrier<is_marked_or_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o); 382 } else { 383 const uintptr_t addr = ZOop::to_address(o); 384 if (ZAddress::is_good(addr)) { 385 // Mark through good oop 386 mark_barrier_on_oop_slow_path(addr); 387 } else { 388 // Mark through bad oop 389 barrier<is_good_or_null_fast_path, mark_barrier_on_oop_slow_path>(p, o); 390 } 391 } 392 } 393 394 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) { 395 for (volatile const oop* const end = p + length; p < end; p++) { 396 mark_barrier_on_oop_field(p, finalizable); 397 } 398 } 399 400 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) { 401 const oop o = *p; 402 root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o); 403 } 404 405 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) { 406 const oop o = *p; 407 root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o); 408 } 409 410 // 411 // Relocate barrier 412 // 413 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) { 414 const oop o = *p; 415 root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o); 416 } 417 418 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP