1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_BARRIERSET_HPP
  26 #define SHARE_VM_GC_SHARED_BARRIERSET_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "oops/access.hpp"
  31 #include "oops/accessBackend.hpp"
  32 #include "oops/oopsHierarchy.hpp"
  33 #include "utilities/fakeRttiSupport.hpp"
  34 
  35 class JavaThread;
  36 
  37 // This class provides the interface between a barrier implementation and
  38 // the rest of the system.
  39 
  40 class BarrierSet: public CHeapObj<mtGC> {
  41   friend class VMStructs;
  42 
  43   static BarrierSet* _bs;
  44 
  45 public:
  46   enum Name {
  47 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
  48     FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
  49 #undef BARRIER_SET_DECLARE_BS_ENUM
  50     UnknownBS
  51   };
  52 
  53   static BarrierSet* barrier_set() { return _bs; }
  54 
  55 protected:
  56   // Fake RTTI support.  For a derived class T to participate
  57   // - T must have a corresponding Name entry.
  58   // - GetName<T> must be specialized to return the corresponding Name
  59   //   entry.
  60   // - If T is a base class, the constructor must have a FakeRtti
  61   //   parameter and pass it up to its base class, with the tag set
  62   //   augmented with the corresponding Name entry.
  63   // - If T is a concrete class, the constructor must create a
  64   //   FakeRtti object whose tag set includes the corresponding Name
  65   //   entry, and pass it up to its base class.
  66   typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
  67 
  68 private:
  69   FakeRtti _fake_rtti;
  70 
  71 public:
  72   // Metafunction mapping a class derived from BarrierSet to the
  73   // corresponding Name enum tag.
  74   template<typename T> struct GetName;
  75 
  76   // Metafunction mapping a Name enum type to the corresponding
  77   // lass derived from BarrierSet.
  78   template<BarrierSet::Name T> struct GetType;
  79 
  80   // Note: This is not presently the Name corresponding to the
  81   // concrete class of this object.
  82   BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
  83 
  84   // Test whether this object is of the type corresponding to bsn.
  85   bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
  86 
  87   // End of fake RTTI support.
  88 
  89 protected:
  90   BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
  91   ~BarrierSet() { }
  92 
  93 public:
  94   // Operations on arrays, or general regions (e.g., for "clone") may be
  95   // optimized by some barriers.
  96 
  97   // Below length is the # array elements being written
  98   virtual void write_ref_array_pre(oop* dst, int length,
  99                                    bool dest_uninitialized = false) {}
 100   virtual void write_ref_array_pre(narrowOop* dst, int length,
 101                                    bool dest_uninitialized = false) {}
 102   // Below count is the # array elements being written, starting
 103   // at the address "start", which may not necessarily be HeapWord-aligned
 104   inline void write_ref_array(HeapWord* start, size_t count);
 105 
 106   // Static versions, suitable for calling from generated code;
 107   // count is # array elements being written, starting with "start",
 108   // which may not necessarily be HeapWord-aligned.
 109   static void static_write_ref_array_pre(HeapWord* start, size_t count);
 110   static void static_write_ref_array_post(HeapWord* start, size_t count);
 111 
 112   // Support for optimizing compilers to call the barrier set on slow path allocations
 113   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
 114   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
 115   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
 116   // to be in old.
 117   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
 118   virtual void on_thread_attach(JavaThread* thread) {}
 119   virtual void on_thread_detach(JavaThread* thread) {}
 120   virtual void make_parsable(JavaThread* thread) {}
 121 
 122 protected:
 123   virtual void write_ref_array_work(MemRegion mr) = 0;
 124 
 125 public:
 126   // Inform the BarrierSet that the the covered heap region that starts
 127   // with "base" has been changed to have the given size (possibly from 0,
 128   // for initialization.)
 129   virtual void resize_covered_region(MemRegion new_region) = 0;
 130 
 131   // If the barrier set imposes any alignment restrictions on boundaries
 132   // within the heap, this function tells whether they are met.
 133   virtual bool is_aligned(HeapWord* addr) = 0;
 134 
 135   // Print a description of the memory for the barrier set
 136   virtual void print_on(outputStream* st) const = 0;
 137 
 138   static void set_bs(BarrierSet* bs) { _bs = bs; }
 139 
 140   // The AccessBarrier of a BarrierSet subclass is called by the Access API
 141   // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
 142   // may override these default access operations by declaring an
 143   // AccessBarrier class in its BarrierSet. Its accessors will then be
 144   // automatically resolved at runtime.
 145   //
 146   // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
 147   // the following steps should be taken:
 148   // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
 149   // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
 150   // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
 151   template <DecoratorSet decorators, typename BarrierSetT>
 152   class AccessBarrier: protected RawAccessBarrier<decorators> {
 153   private:
 154     typedef RawAccessBarrier<decorators> Raw;
 155 
 156   public:
 157     // Primitive heap accesses. These accessors get resolved when
 158     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
 159     // not an oop_* overload, and the barrier strength is AS_NORMAL.
 160     template <typename T>
 161     static T load_in_heap(T* addr) {
 162       return Raw::template load<T>(addr);
 163     }
 164 
 165     template <typename T>
 166     static T load_in_heap_at(oop base, ptrdiff_t offset) {
 167       return Raw::template load_at<T>(base, offset);
 168     }
 169 
 170     template <typename T>
 171     static void store_in_heap(T* addr, T value) {
 172       Raw::store(addr, value);
 173     }
 174 
 175     template <typename T>
 176     static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
 177       Raw::store_at(base, offset, value);
 178     }
 179 
 180     template <typename T>
 181     static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
 182       return Raw::atomic_cmpxchg(new_value, addr, compare_value);
 183     }
 184 
 185     template <typename T>
 186     static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 187       return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
 188     }
 189 
 190     template <typename T>
 191     static T atomic_xchg_in_heap(T new_value, T* addr) {
 192       return Raw::atomic_xchg(new_value, addr);
 193     }
 194 
 195     template <typename T>
 196     static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
 197       return Raw::atomic_xchg_at(new_value, base, offset);
 198     }
 199 
 200     template <typename T>
 201     static bool arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 202       return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
 203     }
 204 
 205     // Heap oop accesses. These accessors get resolved when
 206     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
 207     // an oop_* overload, and the barrier strength is AS_NORMAL.
 208     template <typename T>
 209     static oop oop_load_in_heap(T* addr) {
 210       return Raw::template oop_load<oop>(addr);
 211     }
 212 
 213     static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
 214       return Raw::template oop_load_at<oop>(base, offset);
 215     }
 216 
 217     template <typename T>
 218     static void oop_store_in_heap(T* addr, oop value) {
 219       Raw::oop_store(addr, value);
 220     }
 221 
 222     static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
 223       Raw::oop_store_at(base, offset, value);
 224     }
 225 
 226     template <typename T>
 227     static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
 228       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 229     }
 230 
 231     static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
 232       return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
 233     }
 234 
 235     template <typename T>
 236     static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
 237       return Raw::oop_atomic_xchg(new_value, addr);
 238     }
 239 
 240     static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
 241       return Raw::oop_atomic_xchg_at(new_value, base, offset);
 242     }
 243 
 244     template <typename T>
 245     static bool oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 246       return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
 247     }
 248 
 249     // Off-heap oop accesses. These accessors get resolved when
 250     // IN_HEAP is not set (e.g. when using the RootAccess API), it is
 251     // an oop* overload, and the barrier strength is AS_NORMAL.
 252     template <typename T>
 253     static oop oop_load_not_in_heap(T* addr) {
 254       return Raw::template oop_load<oop>(addr);
 255     }
 256 
 257     template <typename T>
 258     static void oop_store_not_in_heap(T* addr, oop value) {
 259       Raw::oop_store(addr, value);
 260     }
 261 
 262     template <typename T>
 263     static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
 264       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 265     }
 266 
 267     template <typename T>
 268     static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
 269       return Raw::oop_atomic_xchg(new_value, addr);
 270     }
 271 
 272     // Clone barrier support
 273     static void clone_in_heap(oop src, oop dst, size_t size) {
 274       Raw::clone(src, dst, size);
 275     }
 276   };
 277 };
 278 
 279 template<typename T>
 280 inline T* barrier_set_cast(BarrierSet* bs) {
 281   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
 282   return static_cast<T*>(bs);
 283 }
 284 
 285 #endif // SHARE_VM_GC_SHARED_BARRIERSET_HPP