1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_BARRIERSET_HPP
  26 #define SHARE_GC_SHARED_BARRIERSET_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "oops/access.hpp"
  31 #include "oops/accessBackend.hpp"
  32 #include "oops/oopsHierarchy.hpp"
  33 #include "utilities/exceptions.hpp"
  34 #include "utilities/fakeRttiSupport.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 class BarrierSetAssembler;
  38 class BarrierSetC1;
  39 class BarrierSetC2;
  40 class BarrierSetNMethod;
  41 class JavaThread;
  42 
  43 // This class provides the interface between a barrier implementation and
  44 // the rest of the system.
  45 
  46 class BarrierSet: public CHeapObj<mtGC> {
  47   friend class VMStructs;
  48 
  49   static BarrierSet* _barrier_set;
  50 
  51 public:
  52   enum Name {
  53 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
  54     FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
  55 #undef BARRIER_SET_DECLARE_BS_ENUM
  56     UnknownBS
  57   };
  58 
  59 protected:
  60   // Fake RTTI support.  For a derived class T to participate
  61   // - T must have a corresponding Name entry.
  62   // - GetName<T> must be specialized to return the corresponding Name
  63   //   entry.
  64   // - If T is a base class, the constructor must have a FakeRtti
  65   //   parameter and pass it up to its base class, with the tag set
  66   //   augmented with the corresponding Name entry.
  67   // - If T is a concrete class, the constructor must create a
  68   //   FakeRtti object whose tag set includes the corresponding Name
  69   //   entry, and pass it up to its base class.
  70   typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
  71 
  72 private:
  73   FakeRtti _fake_rtti;
  74   BarrierSetAssembler* _barrier_set_assembler;
  75   BarrierSetC1* _barrier_set_c1;
  76   BarrierSetC2* _barrier_set_c2;
  77   BarrierSetNMethod* _barrier_set_nmethod;
  78 
  79 public:
  80   // Metafunction mapping a class derived from BarrierSet to the
  81   // corresponding Name enum tag.
  82   template<typename T> struct GetName;
  83 
  84   // Metafunction mapping a Name enum type to the corresponding
  85   // lass derived from BarrierSet.
  86   template<BarrierSet::Name T> struct GetType;
  87 
  88   // Note: This is not presently the Name corresponding to the
  89   // concrete class of this object.
  90   BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
  91 
  92   // Test whether this object is of the type corresponding to bsn.
  93   bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
  94 
  95   // End of fake RTTI support.
  96 
  97 protected:
  98   BarrierSet(BarrierSetAssembler* barrier_set_assembler,
  99              BarrierSetC1* barrier_set_c1,
 100              BarrierSetC2* barrier_set_c2,
 101              BarrierSetNMethod* barrier_set_nmethod,
 102              const FakeRtti& fake_rtti) :
 103     _fake_rtti(fake_rtti),
 104     _barrier_set_assembler(barrier_set_assembler),
 105     _barrier_set_c1(barrier_set_c1),
 106     _barrier_set_c2(barrier_set_c2),
 107     _barrier_set_nmethod(barrier_set_nmethod) {}
 108   ~BarrierSet() { }
 109 
 110   template <class BarrierSetAssemblerT>
 111   static BarrierSetAssembler* make_barrier_set_assembler() {
 112     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
 113   }
 114 
 115   template <class BarrierSetC1T>
 116   static BarrierSetC1* make_barrier_set_c1() {
 117     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
 118   }
 119 
 120   template <class BarrierSetC2T>
 121   static BarrierSetC2* make_barrier_set_c2() {
 122     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
 123   }
 124 
 125   static void throw_array_null_pointer_store_exception(arrayOop src, arrayOop dst, TRAPS);
 126   static void throw_array_store_exception(arrayOop src, arrayOop dst, TRAPS);
 127 
 128 public:
 129   // Support for optimizing compilers to call the barrier set on slow path allocations
 130   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
 131   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
 132   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
 133   // to be in old.
 134   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
 135   virtual void on_thread_create(Thread* thread) {}
 136   virtual void on_thread_destroy(Thread* thread) {}
 137 
 138   // These perform BarrierSet-related initialization/cleanup before the thread
 139   // is added to or removed from the corresponding set of threads. The
 140   // argument thread is the current thread. These are called either holding
 141   // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding
 142   // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the
 143   // caller. That locking ensures the operation is "atomic" with the list
 144   // modification wrto operations that hold the NJTList_lock and either also
 145   // hold the Threads_lock or are at a safepoint.
 146   virtual void on_thread_attach(Thread* thread) {}
 147   virtual void on_thread_detach(Thread* thread) {}
 148 
 149   virtual void make_parsable(JavaThread* thread) {}
 150 
 151 #ifdef CHECK_UNHANDLED_OOPS
 152   virtual bool oop_equals_operator_allowed() { return true; }
 153 #endif
 154 
 155 public:
 156   // Print a description of the memory for the barrier set
 157   virtual void print_on(outputStream* st) const = 0;
 158 
 159   static BarrierSet* barrier_set() { return _barrier_set; }
 160   static void set_barrier_set(BarrierSet* barrier_set);
 161 
 162   BarrierSetAssembler* barrier_set_assembler() {
 163     assert(_barrier_set_assembler != NULL, "should be set");
 164     return _barrier_set_assembler;
 165   }
 166 
 167   BarrierSetC1* barrier_set_c1() {
 168     assert(_barrier_set_c1 != NULL, "should be set");
 169     return _barrier_set_c1;
 170   }
 171 
 172   BarrierSetC2* barrier_set_c2() {
 173     assert(_barrier_set_c2 != NULL, "should be set");
 174     return _barrier_set_c2;
 175   }
 176 
 177   BarrierSetNMethod* barrier_set_nmethod() {
 178     return _barrier_set_nmethod;
 179   }
 180 
 181   // The AccessBarrier of a BarrierSet subclass is called by the Access API
 182   // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
 183   // may override these default access operations by declaring an
 184   // AccessBarrier class in its BarrierSet. Its accessors will then be
 185   // automatically resolved at runtime.
 186   //
 187   // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
 188   // the following steps should be taken:
 189   // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
 190   // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
 191   // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
 192   template <DecoratorSet decorators, typename BarrierSetT>
 193   class AccessBarrier: protected RawAccessBarrier<decorators> {
 194   private:
 195     typedef RawAccessBarrier<decorators> Raw;
 196 
 197   public:
 198     // Primitive heap accesses. These accessors get resolved when
 199     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
 200     // not an oop_* overload, and the barrier strength is AS_NORMAL.
 201     template <typename T>
 202     static T load_in_heap(T* addr) {
 203       return Raw::template load<T>(addr);
 204     }
 205 
 206     template <typename T>
 207     static T load_in_heap_at(oop base, ptrdiff_t offset) {
 208       return Raw::template load_at<T>(base, offset);
 209     }
 210 
 211     template <typename T>
 212     static void store_in_heap(T* addr, T value) {
 213       Raw::store(addr, value);
 214     }
 215 
 216     template <typename T>
 217     static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
 218       Raw::store_at(base, offset, value);
 219     }
 220 
 221     template <typename T>
 222     static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
 223       return Raw::atomic_cmpxchg(new_value, addr, compare_value);
 224     }
 225 
 226     template <typename T>
 227     static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 228       return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
 229     }
 230 
 231     template <typename T>
 232     static T atomic_xchg_in_heap(T new_value, T* addr) {
 233       return Raw::atomic_xchg(new_value, addr);
 234     }
 235 
 236     template <typename T>
 237     static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
 238       return Raw::atomic_xchg_at(new_value, base, offset);
 239     }
 240 
 241     template <typename T>
 242     static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 243                                   arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 244                                   size_t length) {
 245       Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 246                      dst_obj, dst_offset_in_bytes, dst_raw,
 247                      length);
 248     }
 249 
 250     // Heap oop accesses. These accessors get resolved when
 251     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
 252     // an oop_* overload, and the barrier strength is AS_NORMAL.
 253     template <typename T>
 254     static oop oop_load_in_heap(T* addr) {
 255       return Raw::template oop_load<oop>(addr);
 256     }
 257 
 258     static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
 259       return Raw::template oop_load_at<oop>(base, offset);
 260     }
 261 
 262     template <typename T>
 263     static void oop_store_in_heap(T* addr, oop value) {
 264       Raw::oop_store(addr, value);
 265     }
 266 
 267     static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
 268       Raw::oop_store_at(base, offset, value);
 269     }
 270 
 271     template <typename T>
 272     static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
 273       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 274     }
 275 
 276     static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
 277       return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
 278     }
 279 
 280     template <typename T>
 281     static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
 282       return Raw::oop_atomic_xchg(new_value, addr);
 283     }
 284 
 285     static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
 286       return Raw::oop_atomic_xchg_at(new_value, base, offset);
 287     }
 288 
 289     template <typename T>
 290     static void oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 291                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 292                                       size_t length);
 293 
 294     // Off-heap oop accesses. These accessors get resolved when
 295     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
 296     // an oop* overload, and the barrier strength is AS_NORMAL.
 297     template <typename T>
 298     static oop oop_load_not_in_heap(T* addr) {
 299       return Raw::template oop_load<oop>(addr);
 300     }
 301 
 302     template <typename T>
 303     static void oop_store_not_in_heap(T* addr, oop value) {
 304       Raw::oop_store(addr, value);
 305     }
 306 
 307     template <typename T>
 308     static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
 309       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 310     }
 311 
 312     template <typename T>
 313     static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
 314       return Raw::oop_atomic_xchg(new_value, addr);
 315     }
 316 
 317     // Clone barrier support
 318     static void clone_in_heap(oop src, oop dst, size_t size) {
 319       Raw::clone(src, dst, size);
 320     }
 321 
 322     static oop resolve(oop obj) {
 323       return Raw::resolve(obj);
 324     }
 325 
 326     static bool equals(oop o1, oop o2) {
 327       return Raw::equals(o1, o2);
 328     }
 329   };
 330 };
 331 
 332 template<typename T>
 333 inline T* barrier_set_cast(BarrierSet* bs) {
 334   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
 335   return static_cast<T*>(bs);
 336 }
 337 
 338 #endif // SHARE_GC_SHARED_BARRIERSET_HPP