1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP 26 #define SHARE_VM_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 protected: 46 #ifdef ASSERT 47 const TypePtr* _adr_type; // What kind of memory is being addressed? 48 #endif 49 virtual uint size_of() const; 50 public: 51 enum { Control, // When is it safe to do this load? 52 Memory, // Chunk of memory is being loaded from 53 Address, // Actually address, derived from base 54 ValueIn, // Value to store 55 OopStore // Preceeding oop store, only in StoreCM 56 }; 57 typedef enum { unordered = 0, 58 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 59 release, // Store has to release or be preceded by MemBarRelease. 60 seqcst, // LoadStore has to have both acquire and release semantics. 61 unset // The memory ordering is not set (used for testing) 62 } MemOrd; 63 protected: 64 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 65 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { 66 init_class_id(Class_Mem); 67 debug_only(_adr_type=at; adr_type();) 68 } 69 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 70 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { 71 init_class_id(Class_Mem); 72 debug_only(_adr_type=at; adr_type();) 73 } 74 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 75 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { 76 init_class_id(Class_Mem); 77 debug_only(_adr_type=at; adr_type();) 78 } 79 80 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 81 82 public: 83 // Helpers for the optimizer. Documented in memnode.cpp. 84 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 85 Node* p2, AllocateNode* a2, 86 PhaseTransform* phase); 87 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 88 89 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 90 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 91 // This one should probably be a phase-specific function: 92 static bool all_controls_dominate(Node* dom, Node* sub); 93 94 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 95 96 // Shared code for Ideal methods: 97 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 98 99 // Helper function for adr_type() implementations. 100 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 101 102 // Raw access function, to allow copying of adr_type efficiently in 103 // product builds and retain the debug info for debug builds. 104 const TypePtr *raw_adr_type() const { 105 #ifdef ASSERT 106 return _adr_type; 107 #else 108 return 0; 109 #endif 110 } 111 112 // Map a load or store opcode to its corresponding store opcode. 113 // (Return -1 if unknown.) 114 virtual int store_Opcode() const { return -1; } 115 116 // What is the type of the value in memory? (T_VOID mean "unspecified".) 117 virtual BasicType memory_type() const = 0; 118 virtual int memory_size() const { 119 #ifdef ASSERT 120 return type2aelembytes(memory_type(), true); 121 #else 122 return type2aelembytes(memory_type()); 123 #endif 124 } 125 126 // Search through memory states which precede this node (load or store). 127 // Look for an exact match for the address, with no intervening 128 // aliased stores. 129 Node* find_previous_store(PhaseTransform* phase); 130 131 // Can this node (load or store) accurately see a stored value in 132 // the given memory state? (The state may or may not be in(Memory).) 133 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 134 135 void set_unaligned_access() { _unaligned_access = true; } 136 bool is_unaligned_access() const { return _unaligned_access; } 137 void set_mismatched_access() { _mismatched_access = true; } 138 bool is_mismatched_access() const { return _mismatched_access; } 139 140 #ifndef PRODUCT 141 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 142 virtual void dump_spec(outputStream *st) const; 143 #endif 144 }; 145 146 //------------------------------LoadNode--------------------------------------- 147 // Load value; requires Memory and Address 148 class LoadNode : public MemNode { 149 public: 150 // Some loads (from unsafe) should be pinned: they don't depend only 151 // on the dominating test. The boolean field _depends_only_on_test 152 // below records whether that node depends only on the dominating 153 // test. 154 // Methods used to build LoadNodes pass an argument of type enum 155 // ControlDependency instead of a boolean because those methods 156 // typically have multiple boolean parameters with default values: 157 // passing the wrong boolean to one of these parameters by mistake 158 // goes easily unnoticed. Using an enum, the compiler can check that 159 // the type of a value and the type of the parameter match. 160 enum ControlDependency { 161 Pinned, 162 DependsOnlyOnTest 163 }; 164 private: 165 // LoadNode::hash() doesn't take the _depends_only_on_test field 166 // into account: If the graph already has a non-pinned LoadNode and 167 // we add a pinned LoadNode with the same inputs, it's safe for GVN 168 // to replace the pinned LoadNode with the non-pinned LoadNode, 169 // otherwise it wouldn't be safe to have a non pinned LoadNode with 170 // those inputs in the first place. If the graph already has a 171 // pinned LoadNode and we add a non pinned LoadNode with the same 172 // inputs, it's safe (but suboptimal) for GVN to replace the 173 // non-pinned LoadNode by the pinned LoadNode. 174 bool _depends_only_on_test; 175 176 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 177 // loads that can be reordered, and such requiring acquire semantics to 178 // adhere to the Java specification. The required behaviour is stored in 179 // this field. 180 const MemOrd _mo; 181 182 protected: 183 virtual uint cmp(const Node &n) const; 184 virtual uint size_of() const; // Size is bigger 185 // Should LoadNode::Ideal() attempt to remove control edges? 186 virtual bool can_remove_control() const; 187 const Type* const _type; // What kind of value is loaded? 188 189 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 190 public: 191 192 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 193 : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) { 194 init_class_id(Class_Load); 195 } 196 inline bool is_unordered() const { return !is_acquire(); } 197 inline bool is_acquire() const { 198 assert(_mo == unordered || _mo == acquire, "unexpected"); 199 return _mo == acquire; 200 } 201 202 // Polymorphic factory method: 203 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 204 const TypePtr* at, const Type *rt, BasicType bt, 205 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 206 bool unaligned = false, bool mismatched = false); 207 208 virtual uint hash() const; // Check the type 209 210 // Handle algebraic identities here. If we have an identity, return the Node 211 // we are equivalent to. We look for Load of a Store. 212 virtual Node* Identity(PhaseGVN* phase); 213 214 // If the load is from Field memory and the pointer is non-null, it might be possible to 215 // zero out the control input. 216 // If the offset is constant and the base is an object allocation, 217 // try to hook me up to the exact initializing store. 218 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 219 220 // Split instance field load through Phi. 221 Node* split_through_phi(PhaseGVN *phase); 222 223 // Recover original value from boxed values 224 Node *eliminate_autobox(PhaseGVN *phase); 225 226 // Compute a new Type for this node. Basically we just do the pre-check, 227 // then call the virtual add() to set the type. 228 virtual const Type* Value(PhaseGVN* phase) const; 229 230 // Common methods for LoadKlass and LoadNKlass nodes. 231 const Type* klass_value_common(PhaseGVN* phase) const; 232 Node* klass_identity_common(PhaseGVN* phase); 233 234 virtual uint ideal_reg() const; 235 virtual const Type *bottom_type() const; 236 // Following method is copied from TypeNode: 237 void set_type(const Type* t) { 238 assert(t != NULL, "sanity"); 239 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 240 *(const Type**)&_type = t; // cast away const-ness 241 // If this node is in the hash table, make sure it doesn't need a rehash. 242 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 243 } 244 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 245 246 // Do not match memory edge 247 virtual uint match_edge(uint idx) const; 248 249 // Map a load opcode to its corresponding store opcode. 250 virtual int store_Opcode() const = 0; 251 252 // Check if the load's memory input is a Phi node with the same control. 253 bool is_instance_field_load_with_local_phi(Node* ctrl); 254 255 #ifndef PRODUCT 256 virtual void dump_spec(outputStream *st) const; 257 #endif 258 #ifdef ASSERT 259 // Helper function to allow a raw load without control edge for some cases 260 static bool is_immutable_value(Node* adr); 261 #endif 262 protected: 263 const Type* load_array_final_field(const TypeKlassPtr *tkls, 264 ciKlass* klass) const; 265 266 Node* can_see_arraycopy_value(Node* st, PhaseTransform* phase) const; 267 268 // depends_only_on_test is almost always true, and needs to be almost always 269 // true to enable key hoisting & commoning optimizations. However, for the 270 // special case of RawPtr loads from TLS top & end, and other loads performed by 271 // GC barriers, the control edge carries the dependence preventing hoisting past 272 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 273 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 274 // which produce results (new raw memory state) inside of loops preventing all 275 // manner of other optimizations). Basically, it's ugly but so is the alternative. 276 // See comment in macro.cpp, around line 125 expand_allocate_common(). 277 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; } 278 }; 279 280 //------------------------------LoadBNode-------------------------------------- 281 // Load a byte (8bits signed) from memory 282 class LoadBNode : public LoadNode { 283 public: 284 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 285 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 286 virtual int Opcode() const; 287 virtual uint ideal_reg() const { return Op_RegI; } 288 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 289 virtual const Type* Value(PhaseGVN* phase) const; 290 virtual int store_Opcode() const { return Op_StoreB; } 291 virtual BasicType memory_type() const { return T_BYTE; } 292 }; 293 294 //------------------------------LoadUBNode------------------------------------- 295 // Load a unsigned byte (8bits unsigned) from memory 296 class LoadUBNode : public LoadNode { 297 public: 298 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 299 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 300 virtual int Opcode() const; 301 virtual uint ideal_reg() const { return Op_RegI; } 302 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 303 virtual const Type* Value(PhaseGVN* phase) const; 304 virtual int store_Opcode() const { return Op_StoreB; } 305 virtual BasicType memory_type() const { return T_BYTE; } 306 }; 307 308 //------------------------------LoadUSNode------------------------------------- 309 // Load an unsigned short/char (16bits unsigned) from memory 310 class LoadUSNode : public LoadNode { 311 public: 312 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 313 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 314 virtual int Opcode() const; 315 virtual uint ideal_reg() const { return Op_RegI; } 316 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 317 virtual const Type* Value(PhaseGVN* phase) const; 318 virtual int store_Opcode() const { return Op_StoreC; } 319 virtual BasicType memory_type() const { return T_CHAR; } 320 }; 321 322 //------------------------------LoadSNode-------------------------------------- 323 // Load a short (16bits signed) from memory 324 class LoadSNode : public LoadNode { 325 public: 326 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 327 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 328 virtual int Opcode() const; 329 virtual uint ideal_reg() const { return Op_RegI; } 330 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 331 virtual const Type* Value(PhaseGVN* phase) const; 332 virtual int store_Opcode() const { return Op_StoreC; } 333 virtual BasicType memory_type() const { return T_SHORT; } 334 }; 335 336 //------------------------------LoadINode-------------------------------------- 337 // Load an integer from memory 338 class LoadINode : public LoadNode { 339 public: 340 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 341 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 342 virtual int Opcode() const; 343 virtual uint ideal_reg() const { return Op_RegI; } 344 virtual int store_Opcode() const { return Op_StoreI; } 345 virtual BasicType memory_type() const { return T_INT; } 346 }; 347 348 //------------------------------LoadRangeNode---------------------------------- 349 // Load an array length from the array 350 class LoadRangeNode : public LoadINode { 351 public: 352 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 353 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 354 virtual int Opcode() const; 355 virtual const Type* Value(PhaseGVN* phase) const; 356 virtual Node* Identity(PhaseGVN* phase); 357 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 358 }; 359 360 //------------------------------LoadLNode-------------------------------------- 361 // Load a long from memory 362 class LoadLNode : public LoadNode { 363 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 364 virtual uint cmp( const Node &n ) const { 365 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 366 && LoadNode::cmp(n); 367 } 368 virtual uint size_of() const { return sizeof(*this); } 369 const bool _require_atomic_access; // is piecewise load forbidden? 370 371 public: 372 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 373 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 374 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 375 virtual int Opcode() const; 376 virtual uint ideal_reg() const { return Op_RegL; } 377 virtual int store_Opcode() const { return Op_StoreL; } 378 virtual BasicType memory_type() const { return T_LONG; } 379 bool require_atomic_access() const { return _require_atomic_access; } 380 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 381 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 382 bool unaligned = false, bool mismatched = false); 383 #ifndef PRODUCT 384 virtual void dump_spec(outputStream *st) const { 385 LoadNode::dump_spec(st); 386 if (_require_atomic_access) st->print(" Atomic!"); 387 } 388 #endif 389 }; 390 391 //------------------------------LoadL_unalignedNode---------------------------- 392 // Load a long from unaligned memory 393 class LoadL_unalignedNode : public LoadLNode { 394 public: 395 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 396 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 397 virtual int Opcode() const; 398 }; 399 400 //------------------------------LoadFNode-------------------------------------- 401 // Load a float (64 bits) from memory 402 class LoadFNode : public LoadNode { 403 public: 404 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 405 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 406 virtual int Opcode() const; 407 virtual uint ideal_reg() const { return Op_RegF; } 408 virtual int store_Opcode() const { return Op_StoreF; } 409 virtual BasicType memory_type() const { return T_FLOAT; } 410 }; 411 412 //------------------------------LoadDNode-------------------------------------- 413 // Load a double (64 bits) from memory 414 class LoadDNode : public LoadNode { 415 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 416 virtual uint cmp( const Node &n ) const { 417 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 418 && LoadNode::cmp(n); 419 } 420 virtual uint size_of() const { return sizeof(*this); } 421 const bool _require_atomic_access; // is piecewise load forbidden? 422 423 public: 424 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 425 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 426 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 427 virtual int Opcode() const; 428 virtual uint ideal_reg() const { return Op_RegD; } 429 virtual int store_Opcode() const { return Op_StoreD; } 430 virtual BasicType memory_type() const { return T_DOUBLE; } 431 bool require_atomic_access() const { return _require_atomic_access; } 432 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 433 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 434 bool unaligned = false, bool mismatched = false); 435 #ifndef PRODUCT 436 virtual void dump_spec(outputStream *st) const { 437 LoadNode::dump_spec(st); 438 if (_require_atomic_access) st->print(" Atomic!"); 439 } 440 #endif 441 }; 442 443 //------------------------------LoadD_unalignedNode---------------------------- 444 // Load a double from unaligned memory 445 class LoadD_unalignedNode : public LoadDNode { 446 public: 447 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 448 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 449 virtual int Opcode() const; 450 }; 451 452 //------------------------------LoadPNode-------------------------------------- 453 // Load a pointer from memory (either object or array) 454 class LoadPNode : public LoadNode { 455 public: 456 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 457 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 458 virtual int Opcode() const; 459 virtual uint ideal_reg() const { return Op_RegP; } 460 virtual int store_Opcode() const { return Op_StoreP; } 461 virtual BasicType memory_type() const { return T_ADDRESS; } 462 }; 463 464 465 //------------------------------LoadNNode-------------------------------------- 466 // Load a narrow oop from memory (either object or array) 467 class LoadNNode : public LoadNode { 468 public: 469 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 470 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 471 virtual int Opcode() const; 472 virtual uint ideal_reg() const { return Op_RegN; } 473 virtual int store_Opcode() const { return Op_StoreN; } 474 virtual BasicType memory_type() const { return T_NARROWOOP; } 475 }; 476 477 //------------------------------LoadKlassNode---------------------------------- 478 // Load a Klass from an object 479 class LoadKlassNode : public LoadPNode { 480 protected: 481 // In most cases, LoadKlassNode does not have the control input set. If the control 482 // input is set, it must not be removed (by LoadNode::Ideal()). 483 virtual bool can_remove_control() const; 484 public: 485 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 486 : LoadPNode(c, mem, adr, at, tk, mo) {} 487 virtual int Opcode() const; 488 virtual const Type* Value(PhaseGVN* phase) const; 489 virtual Node* Identity(PhaseGVN* phase); 490 virtual bool depends_only_on_test() const { return true; } 491 492 // Polymorphic factory method: 493 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 494 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 495 }; 496 497 //------------------------------LoadNKlassNode--------------------------------- 498 // Load a narrow Klass from an object. 499 class LoadNKlassNode : public LoadNNode { 500 public: 501 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 502 : LoadNNode(c, mem, adr, at, tk, mo) {} 503 virtual int Opcode() const; 504 virtual uint ideal_reg() const { return Op_RegN; } 505 virtual int store_Opcode() const { return Op_StoreNKlass; } 506 virtual BasicType memory_type() const { return T_NARROWKLASS; } 507 508 virtual const Type* Value(PhaseGVN* phase) const; 509 virtual Node* Identity(PhaseGVN* phase); 510 virtual bool depends_only_on_test() const { return true; } 511 }; 512 513 514 //------------------------------StoreNode-------------------------------------- 515 // Store value; requires Store, Address and Value 516 class StoreNode : public MemNode { 517 private: 518 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 519 // stores that can be reordered, and such requiring release semantics to 520 // adhere to the Java specification. The required behaviour is stored in 521 // this field. 522 const MemOrd _mo; 523 // Needed for proper cloning. 524 virtual uint size_of() const { return sizeof(*this); } 525 protected: 526 virtual uint cmp( const Node &n ) const; 527 virtual bool depends_only_on_test() const { return false; } 528 529 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 530 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 531 532 public: 533 // We must ensure that stores of object references will be visible 534 // only after the object's initialization. So the callers of this 535 // procedure must indicate that the store requires `release' 536 // semantics, if the stored value is an object reference that might 537 // point to a new object and may become externally visible. 538 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 539 : MemNode(c, mem, adr, at, val), _mo(mo) { 540 init_class_id(Class_Store); 541 } 542 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 543 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 544 init_class_id(Class_Store); 545 } 546 547 inline bool is_unordered() const { return !is_release(); } 548 inline bool is_release() const { 549 assert((_mo == unordered || _mo == release), "unexpected"); 550 return _mo == release; 551 } 552 553 // Conservatively release stores of object references in order to 554 // ensure visibility of object initialization. 555 static inline MemOrd release_if_reference(const BasicType t) { 556 #ifdef AARCH64 557 // AArch64 doesn't need a release store here because object 558 // initialization contains the necessary barriers. 559 return unordered; 560 #else 561 const MemOrd mo = (t == T_ARRAY || 562 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 563 t == T_OBJECT) ? release : unordered; 564 return mo; 565 #endif 566 } 567 568 // Polymorphic factory method 569 // 570 // We must ensure that stores of object references will be visible 571 // only after the object's initialization. So the callers of this 572 // procedure must indicate that the store requires `release' 573 // semantics, if the stored value is an object reference that might 574 // point to a new object and may become externally visible. 575 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 576 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 577 578 virtual uint hash() const; // Check the type 579 580 // If the store is to Field memory and the pointer is non-null, we can 581 // zero out the control input. 582 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 583 584 // Compute a new Type for this node. Basically we just do the pre-check, 585 // then call the virtual add() to set the type. 586 virtual const Type* Value(PhaseGVN* phase) const; 587 588 // Check for identity function on memory (Load then Store at same address) 589 virtual Node* Identity(PhaseGVN* phase); 590 591 // Do not match memory edge 592 virtual uint match_edge(uint idx) const; 593 594 virtual const Type *bottom_type() const; // returns Type::MEMORY 595 596 // Map a store opcode to its corresponding own opcode, trivially. 597 virtual int store_Opcode() const { return Opcode(); } 598 599 // have all possible loads of the value stored been optimized away? 600 bool value_never_loaded(PhaseTransform *phase) const; 601 }; 602 603 //------------------------------StoreBNode------------------------------------- 604 // Store byte to memory 605 class StoreBNode : public StoreNode { 606 public: 607 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 608 : StoreNode(c, mem, adr, at, val, mo) {} 609 virtual int Opcode() const; 610 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 611 virtual BasicType memory_type() const { return T_BYTE; } 612 }; 613 614 //------------------------------StoreCNode------------------------------------- 615 // Store char/short to memory 616 class StoreCNode : public StoreNode { 617 public: 618 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 619 : StoreNode(c, mem, adr, at, val, mo) {} 620 virtual int Opcode() const; 621 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 622 virtual BasicType memory_type() const { return T_CHAR; } 623 }; 624 625 //------------------------------StoreINode------------------------------------- 626 // Store int to memory 627 class StoreINode : public StoreNode { 628 public: 629 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 630 : StoreNode(c, mem, adr, at, val, mo) {} 631 virtual int Opcode() const; 632 virtual BasicType memory_type() const { return T_INT; } 633 }; 634 635 //------------------------------StoreLNode------------------------------------- 636 // Store long to memory 637 class StoreLNode : public StoreNode { 638 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 639 virtual uint cmp( const Node &n ) const { 640 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 641 && StoreNode::cmp(n); 642 } 643 virtual uint size_of() const { return sizeof(*this); } 644 const bool _require_atomic_access; // is piecewise store forbidden? 645 646 public: 647 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 648 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 649 virtual int Opcode() const; 650 virtual BasicType memory_type() const { return T_LONG; } 651 bool require_atomic_access() const { return _require_atomic_access; } 652 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 653 #ifndef PRODUCT 654 virtual void dump_spec(outputStream *st) const { 655 StoreNode::dump_spec(st); 656 if (_require_atomic_access) st->print(" Atomic!"); 657 } 658 #endif 659 }; 660 661 //------------------------------StoreFNode------------------------------------- 662 // Store float to memory 663 class StoreFNode : public StoreNode { 664 public: 665 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 666 : StoreNode(c, mem, adr, at, val, mo) {} 667 virtual int Opcode() const; 668 virtual BasicType memory_type() const { return T_FLOAT; } 669 }; 670 671 //------------------------------StoreDNode------------------------------------- 672 // Store double to memory 673 class StoreDNode : public StoreNode { 674 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 675 virtual uint cmp( const Node &n ) const { 676 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 677 && StoreNode::cmp(n); 678 } 679 virtual uint size_of() const { return sizeof(*this); } 680 const bool _require_atomic_access; // is piecewise store forbidden? 681 public: 682 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 683 MemOrd mo, bool require_atomic_access = false) 684 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 685 virtual int Opcode() const; 686 virtual BasicType memory_type() const { return T_DOUBLE; } 687 bool require_atomic_access() const { return _require_atomic_access; } 688 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 689 #ifndef PRODUCT 690 virtual void dump_spec(outputStream *st) const { 691 StoreNode::dump_spec(st); 692 if (_require_atomic_access) st->print(" Atomic!"); 693 } 694 #endif 695 696 }; 697 698 //------------------------------StorePNode------------------------------------- 699 // Store pointer to memory 700 class StorePNode : public StoreNode { 701 public: 702 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 703 : StoreNode(c, mem, adr, at, val, mo) {} 704 virtual int Opcode() const; 705 virtual BasicType memory_type() const { return T_ADDRESS; } 706 }; 707 708 //------------------------------StoreNNode------------------------------------- 709 // Store narrow oop to memory 710 class StoreNNode : public StoreNode { 711 public: 712 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 713 : StoreNode(c, mem, adr, at, val, mo) {} 714 virtual int Opcode() const; 715 virtual BasicType memory_type() const { return T_NARROWOOP; } 716 }; 717 718 //------------------------------StoreNKlassNode-------------------------------------- 719 // Store narrow klass to memory 720 class StoreNKlassNode : public StoreNNode { 721 public: 722 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 723 : StoreNNode(c, mem, adr, at, val, mo) {} 724 virtual int Opcode() const; 725 virtual BasicType memory_type() const { return T_NARROWKLASS; } 726 }; 727 728 //------------------------------StoreCMNode----------------------------------- 729 // Store card-mark byte to memory for CM 730 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 731 // Preceeding equivalent StoreCMs may be eliminated. 732 class StoreCMNode : public StoreNode { 733 private: 734 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 735 virtual uint cmp( const Node &n ) const { 736 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 737 && StoreNode::cmp(n); 738 } 739 virtual uint size_of() const { return sizeof(*this); } 740 int _oop_alias_idx; // The alias_idx of OopStore 741 742 public: 743 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 744 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 745 _oop_alias_idx(oop_alias_idx) { 746 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 747 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 748 "bad oop alias idx"); 749 } 750 virtual int Opcode() const; 751 virtual Node* Identity(PhaseGVN* phase); 752 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 753 virtual const Type* Value(PhaseGVN* phase) const; 754 virtual BasicType memory_type() const { return T_VOID; } // unspecific 755 int oop_alias_idx() const { return _oop_alias_idx; } 756 }; 757 758 //------------------------------LoadPLockedNode--------------------------------- 759 // Load-locked a pointer from memory (either object or array). 760 // On Sparc & Intel this is implemented as a normal pointer load. 761 // On PowerPC and friends it's a real load-locked. 762 class LoadPLockedNode : public LoadPNode { 763 public: 764 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 765 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 766 virtual int Opcode() const; 767 virtual int store_Opcode() const { return Op_StorePConditional; } 768 virtual bool depends_only_on_test() const { return true; } 769 }; 770 771 //------------------------------SCMemProjNode--------------------------------------- 772 // This class defines a projection of the memory state of a store conditional node. 773 // These nodes return a value, but also update memory. 774 class SCMemProjNode : public ProjNode { 775 public: 776 enum {SCMEMPROJCON = (uint)-2}; 777 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 778 virtual int Opcode() const; 779 virtual bool is_CFG() const { return false; } 780 virtual const Type *bottom_type() const {return Type::MEMORY;} 781 virtual const TypePtr *adr_type() const { 782 Node* ctrl = in(0); 783 if (ctrl == NULL) return NULL; // node is dead 784 return ctrl->in(MemNode::Memory)->adr_type(); 785 } 786 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 787 virtual const Type* Value(PhaseGVN* phase) const; 788 #ifndef PRODUCT 789 virtual void dump_spec(outputStream *st) const {}; 790 #endif 791 }; 792 793 //------------------------------LoadStoreNode--------------------------- 794 // Note: is_Mem() method returns 'true' for this class. 795 class LoadStoreNode : public Node { 796 private: 797 const Type* const _type; // What kind of value is loaded? 798 const TypePtr* _adr_type; // What kind of memory is being addressed? 799 virtual uint size_of() const; // Size is bigger 800 public: 801 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 802 virtual bool depends_only_on_test() const { return false; } 803 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 804 805 virtual const Type *bottom_type() const { return _type; } 806 virtual uint ideal_reg() const; 807 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 808 809 bool result_not_used() const; 810 }; 811 812 class LoadStoreConditionalNode : public LoadStoreNode { 813 public: 814 enum { 815 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 816 }; 817 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 818 }; 819 820 //------------------------------StorePConditionalNode--------------------------- 821 // Conditionally store pointer to memory, if no change since prior 822 // load-locked. Sets flags for success or failure of the store. 823 class StorePConditionalNode : public LoadStoreConditionalNode { 824 public: 825 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 826 virtual int Opcode() const; 827 // Produces flags 828 virtual uint ideal_reg() const { return Op_RegFlags; } 829 }; 830 831 //------------------------------StoreIConditionalNode--------------------------- 832 // Conditionally store int to memory, if no change since prior 833 // load-locked. Sets flags for success or failure of the store. 834 class StoreIConditionalNode : public LoadStoreConditionalNode { 835 public: 836 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 837 virtual int Opcode() const; 838 // Produces flags 839 virtual uint ideal_reg() const { return Op_RegFlags; } 840 }; 841 842 //------------------------------StoreLConditionalNode--------------------------- 843 // Conditionally store long to memory, if no change since prior 844 // load-locked. Sets flags for success or failure of the store. 845 class StoreLConditionalNode : public LoadStoreConditionalNode { 846 public: 847 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 848 virtual int Opcode() const; 849 // Produces flags 850 virtual uint ideal_reg() const { return Op_RegFlags; } 851 }; 852 853 class CompareAndSwapNode : public LoadStoreConditionalNode { 854 private: 855 const MemNode::MemOrd _mem_ord; 856 public: 857 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 858 MemNode::MemOrd order() const { 859 return _mem_ord; 860 } 861 }; 862 863 class CompareAndExchangeNode : public LoadStoreNode { 864 private: 865 const MemNode::MemOrd _mem_ord; 866 public: 867 enum { 868 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 869 }; 870 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 871 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 872 init_req(ExpectedIn, ex ); 873 } 874 875 MemNode::MemOrd order() const { 876 return _mem_ord; 877 } 878 }; 879 880 //------------------------------CompareAndSwapLNode--------------------------- 881 class CompareAndSwapLNode : public CompareAndSwapNode { 882 public: 883 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 884 virtual int Opcode() const; 885 }; 886 887 888 //------------------------------CompareAndSwapINode--------------------------- 889 class CompareAndSwapINode : public CompareAndSwapNode { 890 public: 891 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 892 virtual int Opcode() const; 893 }; 894 895 896 //------------------------------CompareAndSwapPNode--------------------------- 897 class CompareAndSwapPNode : public CompareAndSwapNode { 898 public: 899 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 900 virtual int Opcode() const; 901 }; 902 903 //------------------------------CompareAndSwapNNode--------------------------- 904 class CompareAndSwapNNode : public CompareAndSwapNode { 905 public: 906 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 907 virtual int Opcode() const; 908 }; 909 910 911 //------------------------------WeakCompareAndSwapLNode--------------------------- 912 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 913 public: 914 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 915 virtual int Opcode() const; 916 }; 917 918 919 //------------------------------WeakCompareAndSwapINode--------------------------- 920 class WeakCompareAndSwapINode : public CompareAndSwapNode { 921 public: 922 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 923 virtual int Opcode() const; 924 }; 925 926 927 //------------------------------WeakCompareAndSwapPNode--------------------------- 928 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 929 public: 930 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 931 virtual int Opcode() const; 932 }; 933 934 //------------------------------WeakCompareAndSwapNNode--------------------------- 935 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 936 public: 937 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 938 virtual int Opcode() const; 939 }; 940 941 //------------------------------CompareAndExchangeLNode--------------------------- 942 class CompareAndExchangeLNode : public CompareAndExchangeNode { 943 public: 944 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 945 virtual int Opcode() const; 946 }; 947 948 949 //------------------------------CompareAndExchangeINode--------------------------- 950 class CompareAndExchangeINode : public CompareAndExchangeNode { 951 public: 952 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 953 virtual int Opcode() const; 954 }; 955 956 957 //------------------------------CompareAndExchangePNode--------------------------- 958 class CompareAndExchangePNode : public CompareAndExchangeNode { 959 public: 960 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 961 virtual int Opcode() const; 962 }; 963 964 //------------------------------CompareAndExchangeNNode--------------------------- 965 class CompareAndExchangeNNode : public CompareAndExchangeNode { 966 public: 967 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 968 virtual int Opcode() const; 969 }; 970 971 //------------------------------GetAndAddINode--------------------------- 972 class GetAndAddINode : public LoadStoreNode { 973 public: 974 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 975 virtual int Opcode() const; 976 }; 977 978 //------------------------------GetAndAddLNode--------------------------- 979 class GetAndAddLNode : public LoadStoreNode { 980 public: 981 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 982 virtual int Opcode() const; 983 }; 984 985 986 //------------------------------GetAndSetINode--------------------------- 987 class GetAndSetINode : public LoadStoreNode { 988 public: 989 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 990 virtual int Opcode() const; 991 }; 992 993 //------------------------------GetAndSetINode--------------------------- 994 class GetAndSetLNode : public LoadStoreNode { 995 public: 996 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 997 virtual int Opcode() const; 998 }; 999 1000 //------------------------------GetAndSetPNode--------------------------- 1001 class GetAndSetPNode : public LoadStoreNode { 1002 public: 1003 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1004 virtual int Opcode() const; 1005 }; 1006 1007 //------------------------------GetAndSetNNode--------------------------- 1008 class GetAndSetNNode : public LoadStoreNode { 1009 public: 1010 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1011 virtual int Opcode() const; 1012 }; 1013 1014 //------------------------------ClearArray------------------------------------- 1015 class ClearArrayNode: public Node { 1016 public: 1017 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) 1018 : Node(ctrl,arymem,word_cnt,base) { 1019 init_class_id(Class_ClearArray); 1020 } 1021 virtual int Opcode() const; 1022 virtual const Type *bottom_type() const { return Type::MEMORY; } 1023 // ClearArray modifies array elements, and so affects only the 1024 // array memory addressed by the bottom_type of its base address. 1025 virtual const class TypePtr *adr_type() const; 1026 virtual Node* Identity(PhaseGVN* phase); 1027 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1028 virtual uint match_edge(uint idx) const; 1029 1030 // Clear the given area of an object or array. 1031 // The start offset must always be aligned mod BytesPerInt. 1032 // The end offset must always be aligned mod BytesPerLong. 1033 // Return the new memory. 1034 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1035 intptr_t start_offset, 1036 intptr_t end_offset, 1037 PhaseGVN* phase); 1038 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1039 intptr_t start_offset, 1040 Node* end_offset, 1041 PhaseGVN* phase); 1042 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1043 Node* start_offset, 1044 Node* end_offset, 1045 PhaseGVN* phase); 1046 // Return allocation input memory edge if it is different instance 1047 // or itself if it is the one we are looking for. 1048 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1049 }; 1050 1051 //------------------------------MemBar----------------------------------------- 1052 // There are different flavors of Memory Barriers to match the Java Memory 1053 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1054 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1055 // volatile-load. Monitor-exit and volatile-store act as Release: no 1056 // preceding ref can be moved to after them. We insert a MemBar-Release 1057 // before a FastUnlock or volatile-store. All volatiles need to be 1058 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1059 // separate it from any following volatile-load. 1060 class MemBarNode: public MultiNode { 1061 virtual uint hash() const ; // { return NO_HASH; } 1062 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1063 1064 virtual uint size_of() const { return sizeof(*this); } 1065 // Memory type this node is serializing. Usually either rawptr or bottom. 1066 const TypePtr* _adr_type; 1067 1068 public: 1069 enum { 1070 Precedent = TypeFunc::Parms // optional edge to force precedence 1071 }; 1072 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1073 virtual int Opcode() const = 0; 1074 virtual const class TypePtr *adr_type() const { return _adr_type; } 1075 virtual const Type* Value(PhaseGVN* phase) const; 1076 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1077 virtual uint match_edge(uint idx) const { return 0; } 1078 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1079 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1080 // Factory method. Builds a wide or narrow membar. 1081 // Optional 'precedent' becomes an extra edge if not null. 1082 static MemBarNode* make(Compile* C, int opcode, 1083 int alias_idx = Compile::AliasIdxBot, 1084 Node* precedent = NULL); 1085 }; 1086 1087 // "Acquire" - no following ref can move before (but earlier refs can 1088 // follow, like an early Load stalled in cache). Requires multi-cpu 1089 // visibility. Inserted after a volatile load. 1090 class MemBarAcquireNode: public MemBarNode { 1091 public: 1092 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1093 : MemBarNode(C, alias_idx, precedent) {} 1094 virtual int Opcode() const; 1095 }; 1096 1097 // "Acquire" - no following ref can move before (but earlier refs can 1098 // follow, like an early Load stalled in cache). Requires multi-cpu 1099 // visibility. Inserted independ of any load, as required 1100 // for intrinsic Unsafe.loadFence(). 1101 class LoadFenceNode: public MemBarNode { 1102 public: 1103 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1104 : MemBarNode(C, alias_idx, precedent) {} 1105 virtual int Opcode() const; 1106 }; 1107 1108 // "Release" - no earlier ref can move after (but later refs can move 1109 // up, like a speculative pipelined cache-hitting Load). Requires 1110 // multi-cpu visibility. Inserted before a volatile store. 1111 class MemBarReleaseNode: public MemBarNode { 1112 public: 1113 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1114 : MemBarNode(C, alias_idx, precedent) {} 1115 virtual int Opcode() const; 1116 }; 1117 1118 // "Release" - no earlier ref can move after (but later refs can move 1119 // up, like a speculative pipelined cache-hitting Load). Requires 1120 // multi-cpu visibility. Inserted independent of any store, as required 1121 // for intrinsic Unsafe.storeFence(). 1122 class StoreFenceNode: public MemBarNode { 1123 public: 1124 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1125 : MemBarNode(C, alias_idx, precedent) {} 1126 virtual int Opcode() const; 1127 }; 1128 1129 // "Acquire" - no following ref can move before (but earlier refs can 1130 // follow, like an early Load stalled in cache). Requires multi-cpu 1131 // visibility. Inserted after a FastLock. 1132 class MemBarAcquireLockNode: public MemBarNode { 1133 public: 1134 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1135 : MemBarNode(C, alias_idx, precedent) {} 1136 virtual int Opcode() const; 1137 }; 1138 1139 // "Release" - no earlier ref can move after (but later refs can move 1140 // up, like a speculative pipelined cache-hitting Load). Requires 1141 // multi-cpu visibility. Inserted before a FastUnLock. 1142 class MemBarReleaseLockNode: public MemBarNode { 1143 public: 1144 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1145 : MemBarNode(C, alias_idx, precedent) {} 1146 virtual int Opcode() const; 1147 }; 1148 1149 class MemBarStoreStoreNode: public MemBarNode { 1150 public: 1151 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1152 : MemBarNode(C, alias_idx, precedent) { 1153 init_class_id(Class_MemBarStoreStore); 1154 } 1155 virtual int Opcode() const; 1156 }; 1157 1158 // Ordering between a volatile store and a following volatile load. 1159 // Requires multi-CPU visibility? 1160 class MemBarVolatileNode: public MemBarNode { 1161 public: 1162 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1163 : MemBarNode(C, alias_idx, precedent) {} 1164 virtual int Opcode() const; 1165 }; 1166 1167 // Ordering within the same CPU. Used to order unsafe memory references 1168 // inside the compiler when we lack alias info. Not needed "outside" the 1169 // compiler because the CPU does all the ordering for us. 1170 class MemBarCPUOrderNode: public MemBarNode { 1171 public: 1172 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1173 : MemBarNode(C, alias_idx, precedent) {} 1174 virtual int Opcode() const; 1175 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1176 }; 1177 1178 // Isolation of object setup after an AllocateNode and before next safepoint. 1179 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1180 class InitializeNode: public MemBarNode { 1181 friend class AllocateNode; 1182 1183 enum { 1184 Incomplete = 0, 1185 Complete = 1, 1186 WithArraycopy = 2 1187 }; 1188 int _is_complete; 1189 1190 bool _does_not_escape; 1191 1192 public: 1193 enum { 1194 Control = TypeFunc::Control, 1195 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1196 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1197 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1198 }; 1199 1200 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1201 virtual int Opcode() const; 1202 virtual uint size_of() const { return sizeof(*this); } 1203 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1204 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1205 1206 // Manage incoming memory edges via a MergeMem on in(Memory): 1207 Node* memory(uint alias_idx); 1208 1209 // The raw memory edge coming directly from the Allocation. 1210 // The contents of this memory are *always* all-zero-bits. 1211 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1212 1213 // Return the corresponding allocation for this initialization (or null if none). 1214 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1215 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1216 AllocateNode* allocation(); 1217 1218 // Anything other than zeroing in this init? 1219 bool is_non_zero(); 1220 1221 // An InitializeNode must completed before macro expansion is done. 1222 // Completion requires that the AllocateNode must be followed by 1223 // initialization of the new memory to zero, then to any initializers. 1224 bool is_complete() { return _is_complete != Incomplete; } 1225 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1226 1227 // Mark complete. (Must not yet be complete.) 1228 void set_complete(PhaseGVN* phase); 1229 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1230 1231 bool does_not_escape() { return _does_not_escape; } 1232 void set_does_not_escape() { _does_not_escape = true; } 1233 1234 #ifdef ASSERT 1235 // ensure all non-degenerate stores are ordered and non-overlapping 1236 bool stores_are_sane(PhaseTransform* phase); 1237 #endif //ASSERT 1238 1239 // See if this store can be captured; return offset where it initializes. 1240 // Return 0 if the store cannot be moved (any sort of problem). 1241 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1242 1243 // Capture another store; reformat it to write my internal raw memory. 1244 // Return the captured copy, else NULL if there is some sort of problem. 1245 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1246 1247 // Find captured store which corresponds to the range [start..start+size). 1248 // Return my own memory projection (meaning the initial zero bits) 1249 // if there is no such store. Return NULL if there is a problem. 1250 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1251 1252 // Called when the associated AllocateNode is expanded into CFG. 1253 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1254 intptr_t header_size, Node* size_in_bytes, 1255 PhaseGVN* phase); 1256 1257 private: 1258 void remove_extra_zeroes(); 1259 1260 // Find out where a captured store should be placed (or already is placed). 1261 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1262 PhaseTransform* phase); 1263 1264 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1265 1266 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1267 1268 bool detect_init_independence(Node* n, int& count); 1269 1270 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1271 PhaseGVN* phase); 1272 1273 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1274 }; 1275 1276 //------------------------------MergeMem--------------------------------------- 1277 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1278 class MergeMemNode: public Node { 1279 virtual uint hash() const ; // { return NO_HASH; } 1280 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1281 friend class MergeMemStream; 1282 MergeMemNode(Node* def); // clients use MergeMemNode::make 1283 1284 public: 1285 // If the input is a whole memory state, clone it with all its slices intact. 1286 // Otherwise, make a new memory state with just that base memory input. 1287 // In either case, the result is a newly created MergeMem. 1288 static MergeMemNode* make(Node* base_memory); 1289 1290 virtual int Opcode() const; 1291 virtual Node* Identity(PhaseGVN* phase); 1292 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1293 virtual uint ideal_reg() const { return NotAMachineReg; } 1294 virtual uint match_edge(uint idx) const { return 0; } 1295 virtual const RegMask &out_RegMask() const; 1296 virtual const Type *bottom_type() const { return Type::MEMORY; } 1297 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1298 // sparse accessors 1299 // Fetch the previously stored "set_memory_at", or else the base memory. 1300 // (Caller should clone it if it is a phi-nest.) 1301 Node* memory_at(uint alias_idx) const; 1302 // set the memory, regardless of its previous value 1303 void set_memory_at(uint alias_idx, Node* n); 1304 // the "base" is the memory that provides the non-finite support 1305 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1306 // warning: setting the base can implicitly set any of the other slices too 1307 void set_base_memory(Node* def); 1308 // sentinel value which denotes a copy of the base memory: 1309 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1310 static Node* make_empty_memory(); // where the sentinel comes from 1311 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1312 // hook for the iterator, to perform any necessary setup 1313 void iteration_setup(const MergeMemNode* other = NULL); 1314 // push sentinels until I am at least as long as the other (semantic no-op) 1315 void grow_to_match(const MergeMemNode* other); 1316 bool verify_sparse() const PRODUCT_RETURN0; 1317 #ifndef PRODUCT 1318 virtual void dump_spec(outputStream *st) const; 1319 #endif 1320 }; 1321 1322 class MergeMemStream : public StackObj { 1323 private: 1324 MergeMemNode* _mm; 1325 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1326 Node* _mm_base; // loop-invariant base memory of _mm 1327 int _idx; 1328 int _cnt; 1329 Node* _mem; 1330 Node* _mem2; 1331 int _cnt2; 1332 1333 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1334 // subsume_node will break sparseness at times, whenever a memory slice 1335 // folds down to a copy of the base ("fat") memory. In such a case, 1336 // the raw edge will update to base, although it should be top. 1337 // This iterator will recognize either top or base_memory as an 1338 // "empty" slice. See is_empty, is_empty2, and next below. 1339 // 1340 // The sparseness property is repaired in MergeMemNode::Ideal. 1341 // As long as access to a MergeMem goes through this iterator 1342 // or the memory_at accessor, flaws in the sparseness will 1343 // never be observed. 1344 // 1345 // Also, iteration_setup repairs sparseness. 1346 assert(mm->verify_sparse(), "please, no dups of base"); 1347 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1348 1349 _mm = mm; 1350 _mm_base = mm->base_memory(); 1351 _mm2 = mm2; 1352 _cnt = mm->req(); 1353 _idx = Compile::AliasIdxBot-1; // start at the base memory 1354 _mem = NULL; 1355 _mem2 = NULL; 1356 } 1357 1358 #ifdef ASSERT 1359 Node* check_memory() const { 1360 if (at_base_memory()) 1361 return _mm->base_memory(); 1362 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1363 return _mm->memory_at(_idx); 1364 else 1365 return _mm_base; 1366 } 1367 Node* check_memory2() const { 1368 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1369 } 1370 #endif 1371 1372 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1373 void assert_synch() const { 1374 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1375 "no side-effects except through the stream"); 1376 } 1377 1378 public: 1379 1380 // expected usages: 1381 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1382 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1383 1384 // iterate over one merge 1385 MergeMemStream(MergeMemNode* mm) { 1386 mm->iteration_setup(); 1387 init(mm); 1388 debug_only(_cnt2 = 999); 1389 } 1390 // iterate in parallel over two merges 1391 // only iterates through non-empty elements of mm2 1392 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1393 assert(mm2, "second argument must be a MergeMem also"); 1394 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1395 mm->iteration_setup(mm2); 1396 init(mm, mm2); 1397 _cnt2 = mm2->req(); 1398 } 1399 #ifdef ASSERT 1400 ~MergeMemStream() { 1401 assert_synch(); 1402 } 1403 #endif 1404 1405 MergeMemNode* all_memory() const { 1406 return _mm; 1407 } 1408 Node* base_memory() const { 1409 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1410 return _mm_base; 1411 } 1412 const MergeMemNode* all_memory2() const { 1413 assert(_mm2 != NULL, ""); 1414 return _mm2; 1415 } 1416 bool at_base_memory() const { 1417 return _idx == Compile::AliasIdxBot; 1418 } 1419 int alias_idx() const { 1420 assert(_mem, "must call next 1st"); 1421 return _idx; 1422 } 1423 1424 const TypePtr* adr_type() const { 1425 return Compile::current()->get_adr_type(alias_idx()); 1426 } 1427 1428 const TypePtr* adr_type(Compile* C) const { 1429 return C->get_adr_type(alias_idx()); 1430 } 1431 bool is_empty() const { 1432 assert(_mem, "must call next 1st"); 1433 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1434 return _mem->is_top(); 1435 } 1436 bool is_empty2() const { 1437 assert(_mem2, "must call next 1st"); 1438 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1439 return _mem2->is_top(); 1440 } 1441 Node* memory() const { 1442 assert(!is_empty(), "must not be empty"); 1443 assert_synch(); 1444 return _mem; 1445 } 1446 // get the current memory, regardless of empty or non-empty status 1447 Node* force_memory() const { 1448 assert(!is_empty() || !at_base_memory(), ""); 1449 // Use _mm_base to defend against updates to _mem->base_memory(). 1450 Node *mem = _mem->is_top() ? _mm_base : _mem; 1451 assert(mem == check_memory(), ""); 1452 return mem; 1453 } 1454 Node* memory2() const { 1455 assert(_mem2 == check_memory2(), ""); 1456 return _mem2; 1457 } 1458 void set_memory(Node* mem) { 1459 if (at_base_memory()) { 1460 // Note that this does not change the invariant _mm_base. 1461 _mm->set_base_memory(mem); 1462 } else { 1463 _mm->set_memory_at(_idx, mem); 1464 } 1465 _mem = mem; 1466 assert_synch(); 1467 } 1468 1469 // Recover from a side effect to the MergeMemNode. 1470 void set_memory() { 1471 _mem = _mm->in(_idx); 1472 } 1473 1474 bool next() { return next(false); } 1475 bool next2() { return next(true); } 1476 1477 bool next_non_empty() { return next_non_empty(false); } 1478 bool next_non_empty2() { return next_non_empty(true); } 1479 // next_non_empty2 can yield states where is_empty() is true 1480 1481 private: 1482 // find the next item, which might be empty 1483 bool next(bool have_mm2) { 1484 assert((_mm2 != NULL) == have_mm2, "use other next"); 1485 assert_synch(); 1486 if (++_idx < _cnt) { 1487 // Note: This iterator allows _mm to be non-sparse. 1488 // It behaves the same whether _mem is top or base_memory. 1489 _mem = _mm->in(_idx); 1490 if (have_mm2) 1491 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1492 return true; 1493 } 1494 return false; 1495 } 1496 1497 // find the next non-empty item 1498 bool next_non_empty(bool have_mm2) { 1499 while (next(have_mm2)) { 1500 if (!is_empty()) { 1501 // make sure _mem2 is filled in sensibly 1502 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1503 return true; 1504 } else if (have_mm2 && !is_empty2()) { 1505 return true; // is_empty() == true 1506 } 1507 } 1508 return false; 1509 } 1510 }; 1511 1512 //------------------------------Prefetch--------------------------------------- 1513 1514 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1515 class PrefetchAllocationNode : public Node { 1516 public: 1517 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1518 virtual int Opcode() const; 1519 virtual uint ideal_reg() const { return NotAMachineReg; } 1520 virtual uint match_edge(uint idx) const { return idx==2; } 1521 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1522 }; 1523 1524 #endif // SHARE_VM_OPTO_MEMNODE_HPP