1 /* 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_BARRIERSET_HPP 26 #define SHARE_VM_MEMORY_BARRIERSET_HPP 27 28 #include "memory/memRegion.hpp" 29 #include "oops/oopsHierarchy.hpp" 30 31 // This class provides the interface between a barrier implementation and 32 // the rest of the system. 33 34 class BarrierSet: public CHeapObj<mtGC> { 35 friend class VMStructs; 36 public: 37 enum Name { 38 ModRef, 39 CardTableModRef, 40 CardTableExtension, 41 G1SATBCT, 42 G1SATBCTLogging, 43 Other, 44 Uninit 45 }; 46 47 enum Flags { 48 None = 0, 49 TargetUninitialized = 1 50 }; 51 protected: 52 // Some barrier sets create tables whose elements correspond to parts of 53 // the heap; the CardTableModRefBS is an example. Such barrier sets will 54 // normally reserve space for such tables, and commit parts of the table 55 // "covering" parts of the heap that are committed. At most one covered 56 // region per generation is needed. 57 static const int _max_covered_regions = 2; 58 Name _kind; 59 60 BarrierSet(Name kind) { _kind = kind; } 61 ~BarrierSet() { } 62 63 public: 64 65 // To get around prohibition on RTTI. 66 BarrierSet::Name kind() { return _kind; } 67 virtual bool is_a(BarrierSet::Name bsn) = 0; 68 69 // These operations indicate what kind of barriers the BarrierSet has. 70 virtual bool has_read_ref_barrier() = 0; 71 virtual bool has_read_prim_barrier() = 0; 72 virtual bool has_write_ref_barrier() = 0; 73 virtual bool has_write_ref_pre_barrier() = 0; 74 virtual bool has_write_prim_barrier() = 0; 75 76 // These functions indicate whether a particular access of the given 77 // kinds requires a barrier. 78 virtual bool read_ref_needs_barrier(void* field) = 0; 79 virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0; 80 virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, 81 juint val1, juint val2) = 0; 82 83 // The first four operations provide a direct implementation of the 84 // barrier set. An interpreter loop, for example, could call these 85 // directly, as appropriate. 86 87 // Invoke the barrier, if any, necessary when reading the given ref field. 88 virtual void read_ref_field(void* field) = 0; 89 90 // Invoke the barrier, if any, necessary when reading the given primitive 91 // "field" of "bytes" bytes in "obj". 92 virtual void read_prim_field(HeapWord* field, size_t bytes) = 0; 93 94 // Invoke the barrier, if any, necessary when writing "new_val" into the 95 // ref field at "offset" in "obj". 96 // (For efficiency reasons, this operation is specialized for certain 97 // barrier types. Semantically, it should be thought of as a call to the 98 // virtual "_work" function below, which must implement the barrier.) 99 // First the pre-write versions... 100 template <class T> inline void write_ref_field_pre(T* field, oop new_val); 101 private: 102 // Keep this private so as to catch violations at build time. 103 virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); }; 104 protected: 105 virtual void write_ref_field_pre_work( oop* field, oop new_val) {}; 106 virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {}; 107 public: 108 109 // ...then the post-write version. 110 inline void write_ref_field(void* field, oop new_val, bool release = false); 111 protected: 112 virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0; 113 public: 114 115 // Invoke the barrier, if any, necessary when writing the "bytes"-byte 116 // value(s) "val1" (and "val2") into the primitive "field". 117 virtual void write_prim_field(HeapWord* field, size_t bytes, 118 juint val1, juint val2) = 0; 119 120 // Operations on arrays, or general regions (e.g., for "clone") may be 121 // optimized by some barriers. 122 123 // The first six operations tell whether such an optimization exists for 124 // the particular barrier. 125 virtual bool has_read_ref_array_opt() = 0; 126 virtual bool has_read_prim_array_opt() = 0; 127 virtual bool has_write_ref_array_pre_opt() { return true; } 128 virtual bool has_write_ref_array_opt() = 0; 129 virtual bool has_write_prim_array_opt() = 0; 130 131 virtual bool has_read_region_opt() = 0; 132 virtual bool has_write_region_opt() = 0; 133 134 // These operations should assert false unless the corresponding operation 135 // above returns true. Otherwise, they should perform an appropriate 136 // barrier for an array whose elements are all in the given memory region. 137 virtual void read_ref_array(MemRegion mr) = 0; 138 virtual void read_prim_array(MemRegion mr) = 0; 139 140 // Below length is the # array elements being written 141 virtual void write_ref_array_pre(oop* dst, int length, 142 bool dest_uninitialized = false) {} 143 virtual void write_ref_array_pre(narrowOop* dst, int length, 144 bool dest_uninitialized = false) {} 145 // Below count is the # array elements being written, starting 146 // at the address "start", which may not necessarily be HeapWord-aligned 147 inline void write_ref_array(HeapWord* start, size_t count); 148 149 // Static versions, suitable for calling from generated code; 150 // count is # array elements being written, starting with "start", 151 // which may not necessarily be HeapWord-aligned. 152 static void static_write_ref_array_pre(HeapWord* start, size_t count); 153 static void static_write_ref_array_post(HeapWord* start, size_t count); 154 155 protected: 156 virtual void write_ref_array_work(MemRegion mr) = 0; 157 public: 158 virtual void write_prim_array(MemRegion mr) = 0; 159 160 virtual void read_region(MemRegion mr) = 0; 161 162 // (For efficiency reasons, this operation is specialized for certain 163 // barrier types. Semantically, it should be thought of as a call to the 164 // virtual "_work" function below, which must implement the barrier.) 165 inline void write_region(MemRegion mr); 166 protected: 167 virtual void write_region_work(MemRegion mr) = 0; 168 public: 169 // Inform the BarrierSet that the the covered heap region that starts 170 // with "base" has been changed to have the given size (possibly from 0, 171 // for initialization.) 172 virtual void resize_covered_region(MemRegion new_region) = 0; 173 174 // If the barrier set imposes any alignment restrictions on boundaries 175 // within the heap, this function tells whether they are met. 176 virtual bool is_aligned(HeapWord* addr) = 0; 177 178 // Print a description of the memory for the barrier set 179 virtual void print_on(outputStream* st) const = 0; 180 }; 181 182 #endif // SHARE_VM_MEMORY_BARRIERSET_HPP