1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_C2_BARRIERSETC2_HPP 26 #define SHARE_GC_SHARED_C2_BARRIERSETC2_HPP 27 28 #include "memory/allocation.hpp" 29 #include "oops/accessDecorators.hpp" 30 #include "opto/loopnode.hpp" 31 #include "opto/matcher.hpp" 32 #include "opto/memnode.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 35 // This means the access is mismatched. This means the value of an access 36 // is not equivalent to the value pointed to by the address. 37 const DecoratorSet C2_MISMATCHED = DECORATOR_LAST << 1; 38 // The access may not be aligned to its natural size. 39 const DecoratorSet C2_UNALIGNED = DECORATOR_LAST << 2; 40 // The atomic cmpxchg is weak, meaning that spurious false negatives are allowed, 41 // but never false positives. 42 const DecoratorSet C2_WEAK_CMPXCHG = DECORATOR_LAST << 3; 43 // This denotes that a load has control dependency. 44 const DecoratorSet C2_CONTROL_DEPENDENT_LOAD = DECORATOR_LAST << 4; 45 // This denotes that a load that must be pinned. 46 const DecoratorSet C2_PINNED_LOAD = DECORATOR_LAST << 5; 47 // This denotes that the access is produced from the sun.misc.Unsafe intrinsics. 48 const DecoratorSet C2_UNSAFE_ACCESS = DECORATOR_LAST << 6; 49 // This denotes that the access mutates state. 50 const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7; 51 // This denotes that the access reads state. 52 const DecoratorSet C2_READ_ACCESS = DECORATOR_LAST << 8; 53 // A nearby allocation? 54 const DecoratorSet C2_TIGHLY_COUPLED_ALLOC = DECORATOR_LAST << 9; 55 // Loads and stores from an arraycopy being optimized 56 const DecoratorSet C2_ARRAY_COPY = DECORATOR_LAST << 10; 57 58 class Compile; 59 class ConnectionGraph; 60 class GraphKit; 61 class IdealKit; 62 class Node; 63 class PhaseGVN; 64 class PhaseMacroExpand; 65 class Type; 66 class TypePtr; 67 class Unique_Node_List; 68 69 // This class wraps a node and a type. 70 class C2AccessValue: public StackObj { 71 protected: 72 Node* _node; 73 const Type* _type; 74 75 public: 76 C2AccessValue(Node* node, const Type* type) : 77 _node(node), 78 _type(type) {} 79 80 Node* node() const { return _node; } 81 const Type* type() const { return _type; } 82 83 void set_node(Node* node) { _node = node; } 84 }; 85 86 // This class wraps a node and a pointer type. 87 class C2AccessValuePtr: public C2AccessValue { 88 89 public: 90 C2AccessValuePtr(Node* node, const TypePtr* type) : 91 C2AccessValue(node, reinterpret_cast<const Type*>(type)) {} 92 93 const TypePtr* type() const { return reinterpret_cast<const TypePtr*>(_type); } 94 }; 95 96 // This class wraps a bunch of context parameters thare are passed around in the 97 // BarrierSetC2 backend hierarchy, for loads and stores, to reduce boiler plate. 98 class C2Access: public StackObj { 99 protected: 100 DecoratorSet _decorators; 101 BasicType _type; 102 Node* _base; 103 C2AccessValuePtr& _addr; 104 Node* _raw_access; 105 106 void fixup_decorators(); 107 108 public: 109 C2Access(DecoratorSet decorators, 110 BasicType type, Node* base, C2AccessValuePtr& addr) : 111 _decorators(decorators), 112 _type(type), 113 _base(base), 114 _addr(addr), 115 _raw_access(NULL) 116 {} 117 118 DecoratorSet decorators() const { return _decorators; } 119 Node* base() const { return _base; } 120 C2AccessValuePtr& addr() const { return _addr; } 121 BasicType type() const { return _type; } 122 bool is_oop() const { return _type == T_OBJECT || _type == T_ARRAY; } 123 bool is_raw() const { return (_decorators & AS_RAW) != 0; } 124 Node* raw_access() const { return _raw_access; } 125 126 void set_raw_access(Node* raw_access) { _raw_access = raw_access; } 127 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses. 128 129 MemNode::MemOrd mem_node_mo() const; 130 bool needs_cpu_membar() const; 131 132 virtual PhaseGVN& gvn() const = 0; 133 virtual bool is_parse_access() const { return false; } 134 virtual bool is_opt_access() const { return false; } 135 }; 136 137 // C2Access for parse time calls to the BarrierSetC2 backend. 138 class C2ParseAccess: public C2Access { 139 protected: 140 GraphKit* _kit; 141 142 void* barrier_set_state() const; 143 144 public: 145 C2ParseAccess(GraphKit* kit, DecoratorSet decorators, 146 BasicType type, Node* base, C2AccessValuePtr& addr) : 147 C2Access(decorators, type, base, addr), 148 _kit(kit) { 149 fixup_decorators(); 150 } 151 152 GraphKit* kit() const { return _kit; } 153 154 template <typename T> 155 T barrier_set_state_as() const { 156 return reinterpret_cast<T>(barrier_set_state()); 157 } 158 159 virtual PhaseGVN& gvn() const; 160 virtual bool is_parse_access() const { return true; } 161 }; 162 163 // This class wraps a bunch of context parameters thare are passed around in the 164 // BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate. 165 class C2AtomicParseAccess: public C2ParseAccess { 166 Node* _memory; 167 uint _alias_idx; 168 bool _needs_pinning; 169 170 public: 171 C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type, 172 Node* base, C2AccessValuePtr& addr, uint alias_idx) : 173 C2ParseAccess(kit, decorators, type, base, addr), 174 _memory(NULL), 175 _alias_idx(alias_idx), 176 _needs_pinning(true) {} 177 178 // Set the memory node based on the current memory slice. 179 virtual void set_memory(); 180 181 Node* memory() const { return _memory; } 182 uint alias_idx() const { return _alias_idx; } 183 bool needs_pinning() const { return _needs_pinning; } 184 185 void set_needs_pinning(bool value) { _needs_pinning = value; } 186 }; 187 188 // C2Access for optimization time calls to the BarrierSetC2 backend. 189 class C2OptAccess: public C2Access { 190 PhaseGVN& _gvn; 191 MergeMemNode* _mem; 192 Node* _ctl; 193 194 public: 195 C2OptAccess(PhaseGVN& gvn, Node* ctl, MergeMemNode* mem, DecoratorSet decorators, 196 BasicType type, Node* base, C2AccessValuePtr& addr) : 197 C2Access(decorators, type, base, addr), 198 _gvn(gvn), _mem(mem), _ctl(ctl) { 199 fixup_decorators(); 200 } 201 202 203 MergeMemNode* mem() const { return _mem; } 204 Node* ctl() const { return _ctl; } 205 // void set_mem(Node* mem) { _mem = mem; } 206 void set_ctl(Node* ctl) { _ctl = ctl; } 207 208 virtual PhaseGVN& gvn() const { return _gvn; } 209 virtual bool is_opt_access() const { return true; } 210 }; 211 212 213 // This is the top-level class for the backend of the Access API in C2. 214 // The top-level class is responsible for performing raw accesses. The 215 // various GC barrier sets inherit from the BarrierSetC2 class to sprinkle 216 // barriers into the accesses. 217 class BarrierSetC2: public CHeapObj<mtGC> { 218 protected: 219 virtual void resolve_address(C2Access& access) const; 220 virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; 221 virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; 222 223 virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 224 Node* new_val, const Type* val_type) const; 225 virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, 226 Node* new_val, const Type* value_type) const; 227 virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const; 228 virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const; 229 void pin_atomic_op(C2AtomicParseAccess& access) const; 230 231 public: 232 // This is the entry-point for the backend to perform accesses through the Access API. 233 virtual Node* store_at(C2Access& access, C2AccessValue& val) const; 234 virtual Node* load_at(C2Access& access, const Type* val_type) const; 235 236 virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val, 237 Node* new_val, const Type* val_type) const; 238 virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val, 239 Node* new_val, const Type* val_type) const; 240 virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const; 241 virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const; 242 243 virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const; 244 245 virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const { return n; } 246 247 virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, 248 Node*& i_o, Node*& needgc_ctrl, 249 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, 250 intx prefetch_lines) const; 251 252 virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return NULL; } 253 virtual Node* identity_node(PhaseGVN* phase, Node* n) const { return n; } 254 255 // These are general helper methods used by C2 256 enum ArrayCopyPhase { 257 Parsing, 258 Optimization, 259 Expansion 260 }; 261 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; } 262 virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const; 263 264 // Support for GC barriers emitted during parsing 265 virtual bool has_load_barriers() const { return false; } 266 virtual bool is_gc_barrier_node(Node* node) const { return false; } 267 virtual Node* step_over_gc_barrier(Node* c) const { return c; } 268 269 // Support for macro expanded GC barriers 270 virtual void register_potential_barrier_node(Node* node) const { } 271 virtual void unregister_potential_barrier_node(Node* node) const { } 272 virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { } 273 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {} 274 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {} 275 virtual void add_users_to_worklist(Unique_Node_List* worklist) const {} 276 277 // Allow barrier sets to have shared state that is preserved across a compilation unit. 278 // This could for example comprise macro nodes to be expanded during macro expansion. 279 virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; } 280 // If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be 281 // expanded later, then now is the time to do so. 282 virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const { return false; } 283 284 virtual bool has_special_unique_user(const Node* node) const { return false; } 285 286 enum CompilePhase { 287 BeforeOptimize, /* post_parse = true */ 288 BeforeExpand, /* post_parse = false */ 289 BeforeCodeGen 290 }; 291 virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {} 292 293 virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const { return false; } 294 #ifdef ASSERT 295 virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const { return false; } 296 #endif 297 298 virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { return false; } 299 300 virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { return false; } 301 virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { return false; } 302 virtual bool escape_has_out_with_unsafe_object(Node* n) const { return false; } 303 virtual bool escape_is_barrier_node(Node* n) const { return false; } 304 305 virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const { return false; }; 306 virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; }; 307 virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; } 308 309 virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {} 310 virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {} 311 312 }; 313 314 #endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP