1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zBarrier.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zResurrection.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 
  33 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  34 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
  35   uintptr_t addr = ZOop::to_address(o);
  36 
  37 retry:
  38   // Fast path
  39   if (fast_path(addr)) {
  40     return ZOop::to_oop(addr);
  41   }
  42 
  43   // Slow path
  44   const uintptr_t good_addr = slow_path(addr);
  45 
  46   // Self heal, but only if the address was actually updated by the slow path,
  47   // which might not be the case, e.g. when marking through an already good oop.
  48   if (p != NULL && good_addr != addr) {
  49     const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
  50     if (prev_addr != addr) {
  51       // Some other thread overwrote the oop. If this oop was updated by a
  52       // weak barrier the new oop might not be good, in which case we need
  53       // to re-apply this barrier.
  54       addr = prev_addr;
  55       goto retry;
  56     }
  57   }
  58 
  59   return ZOop::to_oop(good_addr);
  60 }
  61 
  62 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  63 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
  64   const uintptr_t addr = ZOop::to_address(o);
  65 
  66   // Fast path
  67   if (fast_path(addr)) {
  68     // Return the good address instead of the weak good address
  69     // to ensure that the currently active heap view is used.
  70     return ZOop::to_oop(ZAddress::good_or_null(addr));
  71   }
  72 
  73   // Slow path
  74   uintptr_t good_addr = slow_path(addr);
  75 
  76   // Self heal unless the address returned from the slow path is null,
  77   // in which case resurrection was blocked and we must let the reference
  78   // processor clear the oop. Mutators are not allowed to clear oops in
  79   // these cases, since that would be similar to calling Reference.clear(),
  80   // which would make the reference non-discoverable or silently dropped
  81   // by the reference processor.
  82   if (p != NULL && good_addr != 0) {
  83     // The slow path returns a good/marked address, but we never mark oops
  84     // in a weak load barrier so we always self heal with the remapped address.
  85     const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
  86     const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
  87     if (prev_addr != addr) {
  88       // Some other thread overwrote the oop. The new
  89       // oop is guaranteed to be weak good or null.
  90       assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
  91 
  92       // Return the good address instead of the weak good address
  93       // to ensure that the currently active heap view is used.
  94       good_addr = ZAddress::good_or_null(prev_addr);
  95     }
  96   }
  97 
  98   return ZOop::to_oop(good_addr);
  99 }
 100 
 101 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 102 inline void ZBarrier::root_barrier(oop* p, oop o) {
 103   const uintptr_t addr = ZOop::to_address(o);
 104 
 105   // Fast path
 106   if (fast_path(addr)) {
 107     return;
 108   }
 109 
 110   // Slow path
 111   const uintptr_t good_addr = slow_path(addr);
 112 
 113   // Non-atomic healing helps speed up root scanning. This is safe to do
 114   // since we are always healing roots in a safepoint, which means we are
 115   // never racing with mutators modifying roots while we are healing them.
 116   // It's also safe in case multiple GC threads try to heal the same root,
 117   // since they would always heal the root in the same way and it does not
 118   // matter in which order it happens.
 119   *p = ZOop::to_oop(good_addr);
 120 }
 121 
 122 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
 123   return ZAddress::is_null(addr);
 124 }
 125 
 126 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 127   return ZAddress::is_good_or_null(addr);
 128 }
 129 
 130 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 131   return ZAddress::is_weak_good_or_null(addr);
 132 }
 133 
 134 inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
 135   const bool is_blocked = ZResurrection::is_blocked();
 136 
 137   // Reload oop after checking the resurrection blocked state. This is
 138   // done to prevent a race where we first load an oop, which is logically
 139   // null but not yet cleared, then this oop is cleared by the reference
 140   // processor and resurrection is unblocked. At this point the mutator
 141   // would see the unblocked state and pass this invalid oop through the
 142   // normal barrier path, which would incorrectly try to mark this oop.
 143   if (p != NULL) {
 144     // First assign to reloaded_o to avoid compiler warning about
 145     // implicit dereference of volatile oop.
 146     const oop reloaded_o = *p;
 147     *o = reloaded_o;
 148   }
 149 
 150   return is_blocked;
 151 }
 152 
 153 //
 154 // Load barrier
 155 //
 156 inline oop ZBarrier::load_barrier_on_oop(oop o) {
 157   return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
 158 }
 159 
 160 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
 161   const oop o = *p;
 162   return load_barrier_on_oop_field_preloaded(p, o);
 163 }
 164 
 165 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
 166   return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
 167 }
 168 
 169 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
 170   for (volatile const oop* const end = p + length; p < end; p++) {
 171     load_barrier_on_oop_field(p);
 172   }
 173 }
 174 
 175 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
 176   if (is_resurrection_blocked(p, &o)) {
 177     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
 178   }
 179 
 180   return load_barrier_on_oop_field_preloaded(p, o);
 181 }
 182 
 183 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
 184   if (is_resurrection_blocked(p, &o)) {
 185     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
 186   }
 187 
 188   return load_barrier_on_oop_field_preloaded(p, o);
 189 }
 190 
 191 //
 192 // Weak load barrier
 193 //
 194 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
 195   return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
 196 }
 197 
 198 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
 199   return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
 200 }
 201 
 202 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
 203   const oop o = *p;
 204   return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
 205 }
 206 
 207 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
 208   if (is_resurrection_blocked(p, &o)) {
 209     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
 210   }
 211 
 212   return weak_load_barrier_on_oop_field_preloaded(p, o);
 213 }
 214 
 215 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
 216   return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
 217 }
 218 
 219 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
 220   const oop o = *p;
 221   return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
 222 }
 223 
 224 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
 225   if (is_resurrection_blocked(p, &o)) {
 226     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
 227   }
 228 
 229   return weak_load_barrier_on_oop_field_preloaded(p, o);
 230 }
 231 
 232 //
 233 // Is alive barrier
 234 //
 235 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
 236   // Check if oop is logically non-null. This operation
 237   // is only valid when resurrection is blocked.
 238   assert(ZResurrection::is_blocked(), "Invalid phase");
 239   return weak_load_barrier_on_weak_oop(o) != NULL;
 240 }
 241 
 242 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
 243   // Check if oop is logically non-null. This operation
 244   // is only valid when resurrection is blocked.
 245   assert(ZResurrection::is_blocked(), "Invalid phase");
 246   return weak_load_barrier_on_phantom_oop(o) != NULL;
 247 }
 248 
 249 //
 250 // Keep alive barrier
 251 //
 252 inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
 253   // This operation is only valid when resurrection is blocked.
 254   assert(ZResurrection::is_blocked(), "Invalid phase");
 255   const oop o = *p;
 256   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
 257 }
 258 
 259 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
 260   // This operation is only valid when resurrection is blocked.
 261   assert(ZResurrection::is_blocked(), "Invalid phase");
 262   const oop o = *p;
 263   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 264 }
 265 
 266 //
 267 // Mark barrier
 268 //
 269 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
 270   // The fast path only checks for null since the GC worker
 271   // threads doing marking wants to mark through good oops.
 272   const oop o = *p;
 273 
 274   if (finalizable) {
 275     barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
 276   } else {
 277     barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
 278   }
 279 }
 280 
 281 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
 282   for (volatile const oop* const end = p + length; p < end; p++) {
 283     mark_barrier_on_oop_field(p, finalizable);
 284   }
 285 }
 286 
 287 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
 288   const oop o = *p;
 289   root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
 290 }
 291 
 292 //
 293 // Relocate barrier
 294 //
 295 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
 296   const oop o = *p;
 297   root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
 298 }
 299 
 300 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP