1 /*
  2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_GC_SHARED_BARRIERSET_HPP
 26 #define SHARE_VM_GC_SHARED_BARRIERSET_HPP
 27 
 28 #include "gc/shared/barrierSetConfig.hpp"
 29 #include "memory/memRegion.hpp"
 30 #include "oops/access.hpp"
 31 #include "oops/accessBackend.hpp"
 32 #include "oops/oopsHierarchy.hpp"
 33 #include "utilities/fakeRttiSupport.hpp"
 34 #include "utilities/macros.hpp"
 35 #include "utilities/sizes.hpp"
 36 
 37 class BarrierSetAssembler;
 38 class BarrierSetC1;
 39 class BarrierSetC2;
 40 class JavaThread;
 41 class nmethod;
 42 class nmethodBarrier;
 43 
 44 // This class provides the interface between a barrier implementation and
 45 // the rest of the system.
 46 
 47 class BarrierSet: public CHeapObj<mtGC> {
 48   friend class VMStructs;
 49 
 50   static BarrierSet* _barrier_set;
 51 
 52 public:
 53   enum Name {
 54 #define BARRIER_SET_DECLARE_BS_ENUM(bs_name) bs_name ,
 55     FOR_EACH_BARRIER_SET_DO(BARRIER_SET_DECLARE_BS_ENUM)
 56 #undef BARRIER_SET_DECLARE_BS_ENUM
 57     UnknownBS
 58   };
 59 
 60 protected:
 61   // Fake RTTI support.  For a derived class T to participate
 62   // - T must have a corresponding Name entry.
 63   // - GetName<T> must be specialized to return the corresponding Name
 64   //   entry.
 65   // - If T is a base class, the constructor must have a FakeRtti
 66   //   parameter and pass it up to its base class, with the tag set
 67   //   augmented with the corresponding Name entry.
 68   // - If T is a concrete class, the constructor must create a
 69   //   FakeRtti object whose tag set includes the corresponding Name
 70   //   entry, and pass it up to its base class.
 71   typedef FakeRttiSupport<BarrierSet, Name> FakeRtti;
 72 
 73 private:
 74   FakeRtti _fake_rtti;
 75   BarrierSetAssembler* _barrier_set_assembler;
 76   BarrierSetC1* _barrier_set_c1;
 77   BarrierSetC2* _barrier_set_c2;
 78 
 79 public:
 80   // Metafunction mapping a class derived from BarrierSet to the
 81   // corresponding Name enum tag.
 82   template<typename T> struct GetName;
 83 
 84   // Metafunction mapping a Name enum type to the corresponding
 85   // lass derived from BarrierSet.
 86   template<BarrierSet::Name T> struct GetType;
 87 
 88   // Note: This is not presently the Name corresponding to the
 89   // concrete class of this object.
 90   BarrierSet::Name kind() const { return _fake_rtti.concrete_tag(); }
 91 
 92   // Test whether this object is of the type corresponding to bsn.
 93   bool is_a(BarrierSet::Name bsn) const { return _fake_rtti.has_tag(bsn); }
 94 
 95   // End of fake RTTI support.
 96 
 97 protected:
 98   BarrierSet(BarrierSetAssembler* barrier_set_assembler,
 99              BarrierSetC1* barrier_set_c1,
100              BarrierSetC2* barrier_set_c2,
101              const FakeRtti& fake_rtti) :
102     _fake_rtti(fake_rtti),
103     _barrier_set_assembler(barrier_set_assembler),
104     _barrier_set_c1(barrier_set_c1),
105     _barrier_set_c2(barrier_set_c2) {}
106   ~BarrierSet() { }
107 
108   template <class BarrierSetAssemblerT>
109   static BarrierSetAssembler* make_barrier_set_assembler() {
110     return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
111   }
112 
113   template <class BarrierSetC1T>
114   static BarrierSetC1* make_barrier_set_c1() {
115     return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
116   }
117 
118   template <class BarrierSetC2T>
119   static BarrierSetC2* make_barrier_set_c2() {
120     return COMPILER2_PRESENT(new BarrierSetC2T()) NOT_COMPILER2(NULL);
121   }
122 
123 public:
124   // Support for optimizing compilers to call the barrier set on slow path allocations
125   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
126   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
127   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
128   // to be in old.
129   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
130   virtual void on_thread_create(Thread* thread) {}
131   virtual void on_thread_destroy(Thread* thread) {}
132   virtual void on_thread_attach(JavaThread* thread) {}
133   virtual void on_thread_detach(JavaThread* thread) {}
134   virtual void make_parsable(JavaThread* thread) {}
135 
136   // nmethod entry barrier support
137   virtual bool needs_nmethod_entry_barrier() const { return false; }
138   virtual ByteSize nmethod_entry_barrier_state_thread_offset() const { return in_ByteSize(0); }
139   virtual bool on_nmethod_entry_barrier(nmethod* nm, nmethodBarrier* nmbarrier);
140 
141 public:
142   // Print a description of the memory for the barrier set
143   virtual void print_on(outputStream* st) const = 0;
144 
145   static BarrierSet* barrier_set() { return _barrier_set; }
146   static void set_barrier_set(BarrierSet* barrier_set);
147 
148   BarrierSetAssembler* barrier_set_assembler() {
149     assert(_barrier_set_assembler != NULL, "should be set");
150     return _barrier_set_assembler;
151   }
152 
153   BarrierSetC1* barrier_set_c1() {
154     assert(_barrier_set_c1 != NULL, "should be set");
155     return _barrier_set_c1;
156   }
157 
158   BarrierSetC2* barrier_set_c2() {
159     assert(_barrier_set_c2 != NULL, "should be set");
160     return _barrier_set_c2;
161   }
162 
163   // The AccessBarrier of a BarrierSet subclass is called by the Access API
164   // (cf. oops/access.hpp) to perform decorated accesses. GC implementations
165   // may override these default access operations by declaring an
166   // AccessBarrier class in its BarrierSet. Its accessors will then be
167   // automatically resolved at runtime.
168   //
169   // In order to register a new FooBarrierSet::AccessBarrier with the Access API,
170   // the following steps should be taken:
171   // 1) Provide an enum "name" for the BarrierSet in barrierSetConfig.hpp
172   // 2) Make sure the barrier set headers are included from barrierSetConfig.inline.hpp
173   // 3) Provide specializations for BarrierSet::GetName and BarrierSet::GetType.
174   template <DecoratorSet decorators, typename BarrierSetT>
175   class AccessBarrier: protected RawAccessBarrier<decorators> {
176   private:
177     typedef RawAccessBarrier<decorators> Raw;
178 
179   public:
180     // Primitive heap accesses. These accessors get resolved when
181     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
182     // not an oop_* overload, and the barrier strength is AS_NORMAL.
183     template <typename T>
184     static T load_in_heap(T* addr) {
185       return Raw::template load<T>(addr);
186     }
187 
188     template <typename T>
189     static T load_in_heap_at(oop base, ptrdiff_t offset) {
190       return Raw::template load_at<T>(base, offset);
191     }
192 
193     template <typename T>
194     static void store_in_heap(T* addr, T value) {
195       Raw::store(addr, value);
196     }
197 
198     template <typename T>
199     static void store_in_heap_at(oop base, ptrdiff_t offset, T value) {
200       Raw::store_at(base, offset, value);
201     }
202 
203     template <typename T>
204     static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
205       return Raw::atomic_cmpxchg(new_value, addr, compare_value);
206     }
207 
208     template <typename T>
209     static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
210       return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
211     }
212 
213     template <typename T>
214     static T atomic_xchg_in_heap(T new_value, T* addr) {
215       return Raw::atomic_xchg(new_value, addr);
216     }
217 
218     template <typename T>
219     static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
220       return Raw::atomic_xchg_at(new_value, base, offset);
221     }
222 
223     template <typename T>
224     static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
225                                   arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
226                                   size_t length) {
227       Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
228                      dst_obj, dst_offset_in_bytes, dst_raw,
229                      length);
230     }
231 
232     // Heap oop accesses. These accessors get resolved when
233     // IN_HEAP is set (e.g. when using the HeapAccess API), it is
234     // an oop_* overload, and the barrier strength is AS_NORMAL.
235     template <typename T>
236     static oop oop_load_in_heap(T* addr) {
237       return Raw::template oop_load<oop>(addr);
238     }
239 
240     static oop oop_load_in_heap_at(oop base, ptrdiff_t offset) {
241       return Raw::template oop_load_at<oop>(base, offset);
242     }
243 
244     template <typename T>
245     static void oop_store_in_heap(T* addr, oop value) {
246       Raw::oop_store(addr, value);
247     }
248 
249     static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value) {
250       Raw::oop_store_at(base, offset, value);
251     }
252 
253     template <typename T>
254     static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
255       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
256     }
257 
258     static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
259       return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
260     }
261 
262     template <typename T>
263     static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
264       return Raw::oop_atomic_xchg(new_value, addr);
265     }
266 
267     static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
268       return Raw::oop_atomic_xchg_at(new_value, base, offset);
269     }
270 
271     template <typename T>
272     static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
273                                       arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
274                                       size_t length) {
275       return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
276                                 dst_obj, dst_offset_in_bytes, dst_raw,
277                                 length);
278     }
279 
280     // Off-heap oop accesses. These accessors get resolved when
281     // IN_HEAP is not set (e.g. when using the NativeAccess API), it is
282     // an oop* overload, and the barrier strength is AS_NORMAL.
283     template <typename T>
284     static oop oop_load_not_in_heap(T* addr) {
285       return Raw::template oop_load<oop>(addr);
286     }
287 
288     template <typename T>
289     static void oop_store_not_in_heap(T* addr, oop value) {
290       Raw::oop_store(addr, value);
291     }
292 
293     template <typename T>
294     static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
295       return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
296     }
297 
298     template <typename T>
299     static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
300       return Raw::oop_atomic_xchg(new_value, addr);
301     }
302 
303     // Clone barrier support
304     static void clone_in_heap(oop src, oop dst, size_t size) {
305       Raw::clone(src, dst, size);
306     }
307 
308     static oop resolve(oop obj) {
309       return Raw::resolve(obj);
310     }
311 
312     static bool equals(oop o1, oop o2) {
313       return Raw::equals(o1, o2);
314     }
315   };
316 };
317 
318 template<typename T>
319 inline T* barrier_set_cast(BarrierSet* bs) {
320   assert(bs->is_a(BarrierSet::GetName<T>::value), "wrong type of barrier set");
321   return static_cast<T*>(bs);
322 }
323 
324 #endif // SHARE_VM_GC_SHARED_BARRIERSET_HPP