1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_BARRIERSET_HPP
  26 #define SHARE_VM_MEMORY_BARRIERSET_HPP
  27 
  28 #include "memory/memRegion.hpp"
  29 #include "oops/oopsHierarchy.hpp"
  30 #include "asm/register.hpp"
  31 
  32 // This class provides the interface between a barrier implementation and
  33 // the rest of the system.
  34 
  35 class MacroAssembler;
  36 
  37 class BarrierSet: public CHeapObj<mtGC> {
  38   friend class VMStructs;
  39 public:
  40   enum Name {
  41     ModRef,
  42     CardTableModRef,
  43     CardTableExtension,
  44     G1SATBCT,
  45     G1SATBCTLogging,
  46     ShenandoahBarrierSet,
  47     Other,
  48     Uninit
  49   };
  50 
  51   enum Flags {
  52     None                = 0,
  53     TargetUninitialized = 1
  54   };
  55 protected:
  56   int _max_covered_regions;
  57   Name _kind;
  58 
  59 public:
  60 
  61   BarrierSet() { _kind = Uninit; }
  62   // To get around prohibition on RTTI.
  63   BarrierSet::Name kind() { return _kind; }
  64   virtual bool is_a(BarrierSet::Name bsn) = 0;
  65 
  66   // These operations indicate what kind of barriers the BarrierSet has.
  67   virtual bool has_read_ref_barrier() = 0;
  68   virtual bool has_read_prim_barrier() = 0;
  69   virtual bool has_write_ref_barrier() = 0;
  70   virtual bool has_write_ref_pre_barrier() = 0;
  71   virtual bool has_write_prim_barrier() = 0;
  72 
  73   // These functions indicate whether a particular access of the given
  74   // kinds requires a barrier.
  75   virtual bool read_ref_needs_barrier(void* field) = 0;
  76   virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
  77   virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
  78                                         juint val1, juint val2) = 0;
  79 
  80   // The first four operations provide a direct implementation of the
  81   // barrier set.  An interpreter loop, for example, could call these
  82   // directly, as appropriate.
  83 
  84   // Invoke the barrier, if any, necessary when reading the given ref field.
  85   virtual void read_ref_field(void* field) = 0;
  86 
  87   // Invoke the barrier, if any, necessary when reading the given primitive
  88   // "field" of "bytes" bytes in "obj".
  89   virtual void read_prim_field(HeapWord* field, size_t bytes) = 0;
  90 
  91   // Invoke the barrier, if any, necessary when writing "new_val" into the
  92   // ref field at "offset" in "obj".
  93   // (For efficiency reasons, this operation is specialized for certain
  94   // barrier types.  Semantically, it should be thought of as a call to the
  95   // virtual "_work" function below, which must implement the barrier.)
  96   // First the pre-write versions...
  97   template <class T> inline void write_ref_field_pre(T* field, oop new_val);
  98 private:
  99   // Keep this private so as to catch violations at build time.
 100   virtual void write_ref_field_pre_work(     void* field, oop new_val) { guarantee(false, "Not needed"); };
 101 protected:
 102   virtual void write_ref_field_pre_work(      oop* field, oop new_val) {};
 103   virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
 104 public:
 105 
 106   // ...then the post-write version.
 107   inline void write_ref_field(void* field, oop new_val, bool release = false);
 108 protected:
 109   virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
 110 public:
 111 
 112   // Invoke the barrier, if any, necessary when writing the "bytes"-byte
 113   // value(s) "val1" (and "val2") into the primitive "field".
 114   virtual void write_prim_field(HeapWord* field, size_t bytes,
 115                                 juint val1, juint val2) = 0;
 116 
 117   // Operations on arrays, or general regions (e.g., for "clone") may be
 118   // optimized by some barriers.
 119 
 120   // The first six operations tell whether such an optimization exists for
 121   // the particular barrier.
 122   virtual bool has_read_ref_array_opt() = 0;
 123   virtual bool has_read_prim_array_opt() = 0;
 124   virtual bool has_write_ref_array_pre_opt() { return true; }
 125   virtual bool has_write_ref_array_opt() = 0;
 126   virtual bool has_write_prim_array_opt() = 0;
 127 
 128   virtual bool has_read_region_opt() = 0;
 129   virtual bool has_write_region_opt() = 0;
 130 
 131   // These operations should assert false unless the correponding operation
 132   // above returns true.  Otherwise, they should perform an appropriate
 133   // barrier for an array whose elements are all in the given memory region.
 134   virtual void read_ref_array(MemRegion mr) = 0;
 135   virtual void read_prim_array(MemRegion mr) = 0;
 136 
 137   // Below length is the # array elements being written
 138   virtual void write_ref_array_pre(oop* dst, int length,
 139                                    bool dest_uninitialized = false) {}
 140   virtual void write_ref_array_pre(narrowOop* dst, int length,
 141                                    bool dest_uninitialized = false) {}
 142   // Below count is the # array elements being written, starting
 143   // at the address "start", which may not necessarily be HeapWord-aligned
 144   virtual void write_ref_array(HeapWord* start, size_t count);
 145 
 146   // Static versions, suitable for calling from generated code;
 147   // count is # array elements being written, starting with "start",
 148   // which may not necessarily be HeapWord-aligned.
 149   static void static_write_ref_array_pre(HeapWord* start, size_t count);
 150   static void static_write_ref_array_post(HeapWord* start, size_t count);
 151 
 152 protected:
 153   virtual void write_ref_array_work(MemRegion mr) = 0;
 154 public:
 155   virtual void write_prim_array(MemRegion mr) = 0;
 156 
 157   virtual void read_region(MemRegion mr) = 0;
 158 
 159   // (For efficiency reasons, this operation is specialized for certain
 160   // barrier types.  Semantically, it should be thought of as a call to the
 161   // virtual "_work" function below, which must implement the barrier.)
 162   inline void write_region(MemRegion mr);
 163 protected:
 164   virtual void write_region_work(MemRegion mr) = 0;
 165 public:
 166 
 167   // Some barrier sets create tables whose elements correspond to parts of
 168   // the heap; the CardTableModRefBS is an example.  Such barrier sets will
 169   // normally reserve space for such tables, and commit parts of the table
 170   // "covering" parts of the heap that are committed.  The constructor is
 171   // passed the maximum number of independently committable subregions to
 172   // be covered, and the "resize_covoered_region" function allows the
 173   // sub-parts of the heap to inform the barrier set of changes of their
 174   // sizes.
 175   BarrierSet(int max_covered_regions) :
 176     _max_covered_regions(max_covered_regions) {}
 177 
 178   // Inform the BarrierSet that the the covered heap region that starts
 179   // with "base" has been changed to have the given size (possibly from 0,
 180   // for initialization.)
 181   virtual void resize_covered_region(MemRegion new_region) = 0;
 182 
 183   // If the barrier set imposes any alignment restrictions on boundaries
 184   // within the heap, this function tells whether they are met.
 185   virtual bool is_aligned(HeapWord* addr) = 0;
 186 
 187   // Print a description of the memory for the barrier set
 188   virtual void print_on(outputStream* st) const = 0;
 189 
 190   virtual oop read_barrier(oop src) {
 191     return src;
 192   }
 193   virtual oop write_barrier(oop src) {
 194     return src;
 195   }
 196 
 197   virtual bool obj_equals(oop obj1, oop obj2);
 198 
 199   virtual bool obj_equals(narrowOop obj1, narrowOop obj2);
 200 
 201 #ifndef CC_INTERP
 202   virtual void interpreter_read_barrier(MacroAssembler* masm, Register dst) {
 203     // Default implementation does nothing.
 204   }
 205 
 206   virtual void interpreter_read_barrier_not_null(MacroAssembler* masm, Register dst) {
 207     // Default implementation does nothing.
 208   }
 209 
 210   virtual void interpreter_write_barrier(MacroAssembler* masm, Register dst) {
 211     // Default implementation does nothing.
 212   }
 213   virtual void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2) {
 214     // Default implementation does nothing.
 215   }
 216 #endif
 217 };
 218 
 219 #endif // SHARE_VM_MEMORY_BARRIERSET_HPP