1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP 26 #define SHARE_VM_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 protected: 46 #ifdef ASSERT 47 const TypePtr* _adr_type; // What kind of memory is being addressed? 48 #endif 49 virtual uint size_of() const; 50 public: 51 enum { Control, // When is it safe to do this load? 52 Memory, // Chunk of memory is being loaded from 53 Address, // Actually address, derived from base 54 ValueIn, // Value to store 55 OopStore // Preceeding oop store, only in StoreCM 56 }; 57 typedef enum { unordered = 0, 58 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 59 release, // Store has to release or be preceded by MemBarRelease. 60 seqcst, // LoadStore has to have both acquire and release semantics. 61 unset // The memory ordering is not set (used for testing) 62 } MemOrd; 63 protected: 64 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 65 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { 66 init_class_id(Class_Mem); 67 debug_only(_adr_type=at; adr_type();) 68 } 69 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 70 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { 71 init_class_id(Class_Mem); 72 debug_only(_adr_type=at; adr_type();) 73 } 74 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 75 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { 76 init_class_id(Class_Mem); 77 debug_only(_adr_type=at; adr_type();) 78 } 79 80 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 81 82 public: 83 // Helpers for the optimizer. Documented in memnode.cpp. 84 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 85 Node* p2, AllocateNode* a2, 86 PhaseTransform* phase); 87 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 88 89 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 90 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 91 // This one should probably be a phase-specific function: 92 static bool all_controls_dominate(Node* dom, Node* sub); 93 94 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 95 96 // Shared code for Ideal methods: 97 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 98 99 // Helper function for adr_type() implementations. 100 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 101 102 // Raw access function, to allow copying of adr_type efficiently in 103 // product builds and retain the debug info for debug builds. 104 const TypePtr *raw_adr_type() const { 105 #ifdef ASSERT 106 return _adr_type; 107 #else 108 return 0; 109 #endif 110 } 111 112 // Map a load or store opcode to its corresponding store opcode. 113 // (Return -1 if unknown.) 114 virtual int store_Opcode() const { return -1; } 115 116 // What is the type of the value in memory? (T_VOID mean "unspecified".) 117 virtual BasicType memory_type() const = 0; 118 virtual int memory_size() const { 119 #ifdef ASSERT 120 return type2aelembytes(memory_type(), true); 121 #else 122 return type2aelembytes(memory_type()); 123 #endif 124 } 125 126 // Search through memory states which precede this node (load or store). 127 // Look for an exact match for the address, with no intervening 128 // aliased stores. 129 Node* find_previous_store(PhaseTransform* phase); 130 131 // Can this node (load or store) accurately see a stored value in 132 // the given memory state? (The state may or may not be in(Memory).) 133 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 134 135 void set_unaligned_access() { _unaligned_access = true; } 136 bool is_unaligned_access() const { return _unaligned_access; } 137 void set_mismatched_access() { _mismatched_access = true; } 138 bool is_mismatched_access() const { return _mismatched_access; } 139 140 #ifndef PRODUCT 141 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 142 virtual void dump_spec(outputStream *st) const; 143 #endif 144 }; 145 146 //------------------------------LoadNode--------------------------------------- 147 // Load value; requires Memory and Address 148 class LoadNode : public MemNode { 149 public: 150 // Some loads (from unsafe) should be pinned: they don't depend only 151 // on the dominating test. The boolean field _depends_only_on_test 152 // below records whether that node depends only on the dominating 153 // test. 154 // Methods used to build LoadNodes pass an argument of type enum 155 // ControlDependency instead of a boolean because those methods 156 // typically have multiple boolean parameters with default values: 157 // passing the wrong boolean to one of these parameters by mistake 158 // goes easily unnoticed. Using an enum, the compiler can check that 159 // the type of a value and the type of the parameter match. 160 enum ControlDependency { 161 Pinned, 162 DependsOnlyOnTest 163 }; 164 private: 165 // LoadNode::hash() doesn't take the _depends_only_on_test field 166 // into account: If the graph already has a non-pinned LoadNode and 167 // we add a pinned LoadNode with the same inputs, it's safe for GVN 168 // to replace the pinned LoadNode with the non-pinned LoadNode, 169 // otherwise it wouldn't be safe to have a non pinned LoadNode with 170 // those inputs in the first place. If the graph already has a 171 // pinned LoadNode and we add a non pinned LoadNode with the same 172 // inputs, it's safe (but suboptimal) for GVN to replace the 173 // non-pinned LoadNode by the pinned LoadNode. 174 ControlDependency _control_dependency; 175 176 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 177 // loads that can be reordered, and such requiring acquire semantics to 178 // adhere to the Java specification. The required behaviour is stored in 179 // this field. 180 const MemOrd _mo; 181 182 protected: 183 virtual uint cmp(const Node &n) const; 184 virtual uint size_of() const; // Size is bigger 185 // Should LoadNode::Ideal() attempt to remove control edges? 186 virtual bool can_remove_control() const; 187 const Type* const _type; // What kind of value is loaded? 188 189 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 190 public: 191 192 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 193 : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _control_dependency(control_dependency) { 194 init_class_id(Class_Load); 195 } 196 inline bool is_unordered() const { return !is_acquire(); } 197 inline bool is_acquire() const { 198 assert(_mo == unordered || _mo == acquire, "unexpected"); 199 return _mo == acquire; 200 } 201 inline bool is_unsigned() const { 202 int lop = Opcode(); 203 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 204 } 205 206 // Polymorphic factory method: 207 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 208 const TypePtr* at, const Type *rt, BasicType bt, 209 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 210 bool unaligned = false, bool mismatched = false); 211 212 virtual uint hash() const; // Check the type 213 214 // Handle algebraic identities here. If we have an identity, return the Node 215 // we are equivalent to. We look for Load of a Store. 216 virtual Node* Identity(PhaseGVN* phase); 217 218 // If the load is from Field memory and the pointer is non-null, it might be possible to 219 // zero out the control input. 220 // If the offset is constant and the base is an object allocation, 221 // try to hook me up to the exact initializing store. 222 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 223 224 // Split instance field load through Phi. 225 Node* split_through_phi(PhaseGVN *phase); 226 227 // Recover original value from boxed values 228 Node *eliminate_autobox(PhaseGVN *phase); 229 230 // Compute a new Type for this node. Basically we just do the pre-check, 231 // then call the virtual add() to set the type. 232 virtual const Type* Value(PhaseGVN* phase) const; 233 234 // Common methods for LoadKlass and LoadNKlass nodes. 235 const Type* klass_value_common(PhaseGVN* phase) const; 236 Node* klass_identity_common(PhaseGVN* phase); 237 238 virtual uint ideal_reg() const; 239 virtual const Type *bottom_type() const; 240 // Following method is copied from TypeNode: 241 void set_type(const Type* t) { 242 assert(t != NULL, "sanity"); 243 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 244 *(const Type**)&_type = t; // cast away const-ness 245 // If this node is in the hash table, make sure it doesn't need a rehash. 246 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 247 } 248 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 249 250 // Do not match memory edge 251 virtual uint match_edge(uint idx) const; 252 253 // Map a load opcode to its corresponding store opcode. 254 virtual int store_Opcode() const = 0; 255 256 // Check if the load's memory input is a Phi node with the same control. 257 bool is_instance_field_load_with_local_phi(Node* ctrl); 258 259 Node* convert_to_unsigned_load(PhaseGVN& gvn); 260 Node* convert_to_signed_load(PhaseGVN& gvn); 261 262 #ifndef PRODUCT 263 virtual void dump_spec(outputStream *st) const; 264 #endif 265 #ifdef ASSERT 266 // Helper function to allow a raw load without control edge for some cases 267 static bool is_immutable_value(Node* adr); 268 #endif 269 protected: 270 const Type* load_array_final_field(const TypeKlassPtr *tkls, 271 ciKlass* klass) const; 272 273 Node* can_see_arraycopy_value(Node* st, PhaseTransform* phase) const; 274 275 // depends_only_on_test is almost always true, and needs to be almost always 276 // true to enable key hoisting & commoning optimizations. However, for the 277 // special case of RawPtr loads from TLS top & end, and other loads performed by 278 // GC barriers, the control edge carries the dependence preventing hoisting past 279 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 280 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 281 // which produce results (new raw memory state) inside of loops preventing all 282 // manner of other optimizations). Basically, it's ugly but so is the alternative. 283 // See comment in macro.cpp, around line 125 expand_allocate_common(). 284 virtual bool depends_only_on_test() const { 285 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 286 } 287 }; 288 289 //------------------------------LoadBNode-------------------------------------- 290 // Load a byte (8bits signed) from memory 291 class LoadBNode : public LoadNode { 292 public: 293 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 294 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 295 virtual int Opcode() const; 296 virtual uint ideal_reg() const { return Op_RegI; } 297 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 298 virtual const Type* Value(PhaseGVN* phase) const; 299 virtual int store_Opcode() const { return Op_StoreB; } 300 virtual BasicType memory_type() const { return T_BYTE; } 301 }; 302 303 //------------------------------LoadUBNode------------------------------------- 304 // Load a unsigned byte (8bits unsigned) from memory 305 class LoadUBNode : public LoadNode { 306 public: 307 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 308 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 309 virtual int Opcode() const; 310 virtual uint ideal_reg() const { return Op_RegI; } 311 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 312 virtual const Type* Value(PhaseGVN* phase) const; 313 virtual int store_Opcode() const { return Op_StoreB; } 314 virtual BasicType memory_type() const { return T_BYTE; } 315 }; 316 317 //------------------------------LoadUSNode------------------------------------- 318 // Load an unsigned short/char (16bits unsigned) from memory 319 class LoadUSNode : public LoadNode { 320 public: 321 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 322 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 323 virtual int Opcode() const; 324 virtual uint ideal_reg() const { return Op_RegI; } 325 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 326 virtual const Type* Value(PhaseGVN* phase) const; 327 virtual int store_Opcode() const { return Op_StoreC; } 328 virtual BasicType memory_type() const { return T_CHAR; } 329 }; 330 331 //------------------------------LoadSNode-------------------------------------- 332 // Load a short (16bits signed) from memory 333 class LoadSNode : public LoadNode { 334 public: 335 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 336 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 337 virtual int Opcode() const; 338 virtual uint ideal_reg() const { return Op_RegI; } 339 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 340 virtual const Type* Value(PhaseGVN* phase) const; 341 virtual int store_Opcode() const { return Op_StoreC; } 342 virtual BasicType memory_type() const { return T_SHORT; } 343 }; 344 345 //------------------------------LoadINode-------------------------------------- 346 // Load an integer from memory 347 class LoadINode : public LoadNode { 348 public: 349 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 350 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 351 virtual int Opcode() const; 352 virtual uint ideal_reg() const { return Op_RegI; } 353 virtual int store_Opcode() const { return Op_StoreI; } 354 virtual BasicType memory_type() const { return T_INT; } 355 }; 356 357 //------------------------------LoadRangeNode---------------------------------- 358 // Load an array length from the array 359 class LoadRangeNode : public LoadINode { 360 public: 361 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 362 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 363 virtual int Opcode() const; 364 virtual const Type* Value(PhaseGVN* phase) const; 365 virtual Node* Identity(PhaseGVN* phase); 366 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 367 }; 368 369 //------------------------------LoadLNode-------------------------------------- 370 // Load a long from memory 371 class LoadLNode : public LoadNode { 372 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 373 virtual uint cmp( const Node &n ) const { 374 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 375 && LoadNode::cmp(n); 376 } 377 virtual uint size_of() const { return sizeof(*this); } 378 const bool _require_atomic_access; // is piecewise load forbidden? 379 380 public: 381 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 382 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 383 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 384 virtual int Opcode() const; 385 virtual uint ideal_reg() const { return Op_RegL; } 386 virtual int store_Opcode() const { return Op_StoreL; } 387 virtual BasicType memory_type() const { return T_LONG; } 388 bool require_atomic_access() const { return _require_atomic_access; } 389 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 390 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 391 bool unaligned = false, bool mismatched = false); 392 #ifndef PRODUCT 393 virtual void dump_spec(outputStream *st) const { 394 LoadNode::dump_spec(st); 395 if (_require_atomic_access) st->print(" Atomic!"); 396 } 397 #endif 398 }; 399 400 //------------------------------LoadL_unalignedNode---------------------------- 401 // Load a long from unaligned memory 402 class LoadL_unalignedNode : public LoadLNode { 403 public: 404 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 405 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 406 virtual int Opcode() const; 407 }; 408 409 //------------------------------LoadFNode-------------------------------------- 410 // Load a float (64 bits) from memory 411 class LoadFNode : public LoadNode { 412 public: 413 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 414 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 415 virtual int Opcode() const; 416 virtual uint ideal_reg() const { return Op_RegF; } 417 virtual int store_Opcode() const { return Op_StoreF; } 418 virtual BasicType memory_type() const { return T_FLOAT; } 419 }; 420 421 //------------------------------LoadDNode-------------------------------------- 422 // Load a double (64 bits) from memory 423 class LoadDNode : public LoadNode { 424 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 425 virtual uint cmp( const Node &n ) const { 426 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 427 && LoadNode::cmp(n); 428 } 429 virtual uint size_of() const { return sizeof(*this); } 430 const bool _require_atomic_access; // is piecewise load forbidden? 431 432 public: 433 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 434 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 435 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 436 virtual int Opcode() const; 437 virtual uint ideal_reg() const { return Op_RegD; } 438 virtual int store_Opcode() const { return Op_StoreD; } 439 virtual BasicType memory_type() const { return T_DOUBLE; } 440 bool require_atomic_access() const { return _require_atomic_access; } 441 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 442 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 443 bool unaligned = false, bool mismatched = false); 444 #ifndef PRODUCT 445 virtual void dump_spec(outputStream *st) const { 446 LoadNode::dump_spec(st); 447 if (_require_atomic_access) st->print(" Atomic!"); 448 } 449 #endif 450 }; 451 452 //------------------------------LoadD_unalignedNode---------------------------- 453 // Load a double from unaligned memory 454 class LoadD_unalignedNode : public LoadDNode { 455 public: 456 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 457 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 458 virtual int Opcode() const; 459 }; 460 461 //------------------------------LoadPNode-------------------------------------- 462 // Load a pointer from memory (either object or array) 463 class LoadPNode : public LoadNode { 464 public: 465 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 466 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 467 virtual int Opcode() const; 468 virtual uint ideal_reg() const { return Op_RegP; } 469 virtual int store_Opcode() const { return Op_StoreP; } 470 virtual BasicType memory_type() const { return T_ADDRESS; } 471 }; 472 473 474 //------------------------------LoadNNode-------------------------------------- 475 // Load a narrow oop from memory (either object or array) 476 class LoadNNode : public LoadNode { 477 public: 478 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 479 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 480 virtual int Opcode() const; 481 virtual uint ideal_reg() const { return Op_RegN; } 482 virtual int store_Opcode() const { return Op_StoreN; } 483 virtual BasicType memory_type() const { return T_NARROWOOP; } 484 }; 485 486 //------------------------------LoadKlassNode---------------------------------- 487 // Load a Klass from an object 488 class LoadKlassNode : public LoadPNode { 489 protected: 490 // In most cases, LoadKlassNode does not have the control input set. If the control 491 // input is set, it must not be removed (by LoadNode::Ideal()). 492 virtual bool can_remove_control() const; 493 public: 494 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 495 : LoadPNode(c, mem, adr, at, tk, mo) {} 496 virtual int Opcode() const; 497 virtual const Type* Value(PhaseGVN* phase) const; 498 virtual Node* Identity(PhaseGVN* phase); 499 virtual bool depends_only_on_test() const { return true; } 500 501 // Polymorphic factory method: 502 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 503 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 504 }; 505 506 //------------------------------LoadNKlassNode--------------------------------- 507 // Load a narrow Klass from an object. 508 class LoadNKlassNode : public LoadNNode { 509 public: 510 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 511 : LoadNNode(c, mem, adr, at, tk, mo) {} 512 virtual int Opcode() const; 513 virtual uint ideal_reg() const { return Op_RegN; } 514 virtual int store_Opcode() const { return Op_StoreNKlass; } 515 virtual BasicType memory_type() const { return T_NARROWKLASS; } 516 517 virtual const Type* Value(PhaseGVN* phase) const; 518 virtual Node* Identity(PhaseGVN* phase); 519 virtual bool depends_only_on_test() const { return true; } 520 }; 521 522 523 //------------------------------StoreNode-------------------------------------- 524 // Store value; requires Store, Address and Value 525 class StoreNode : public MemNode { 526 private: 527 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 528 // stores that can be reordered, and such requiring release semantics to 529 // adhere to the Java specification. The required behaviour is stored in 530 // this field. 531 const MemOrd _mo; 532 // Needed for proper cloning. 533 virtual uint size_of() const { return sizeof(*this); } 534 protected: 535 virtual uint cmp( const Node &n ) const; 536 virtual bool depends_only_on_test() const { return false; } 537 538 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 539 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 540 541 public: 542 // We must ensure that stores of object references will be visible 543 // only after the object's initialization. So the callers of this 544 // procedure must indicate that the store requires `release' 545 // semantics, if the stored value is an object reference that might 546 // point to a new object and may become externally visible. 547 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 548 : MemNode(c, mem, adr, at, val), _mo(mo) { 549 init_class_id(Class_Store); 550 } 551 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 552 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 553 init_class_id(Class_Store); 554 } 555 556 inline bool is_unordered() const { return !is_release(); } 557 inline bool is_release() const { 558 assert((_mo == unordered || _mo == release), "unexpected"); 559 return _mo == release; 560 } 561 562 // Conservatively release stores of object references in order to 563 // ensure visibility of object initialization. 564 static inline MemOrd release_if_reference(const BasicType t) { 565 #ifdef AARCH64 566 // AArch64 doesn't need a release store here because object 567 // initialization contains the necessary barriers. 568 return unordered; 569 #else 570 const MemOrd mo = (t == T_ARRAY || 571 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 572 t == T_OBJECT) ? release : unordered; 573 return mo; 574 #endif 575 } 576 577 // Polymorphic factory method 578 // 579 // We must ensure that stores of object references will be visible 580 // only after the object's initialization. So the callers of this 581 // procedure must indicate that the store requires `release' 582 // semantics, if the stored value is an object reference that might 583 // point to a new object and may become externally visible. 584 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 585 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 586 587 virtual uint hash() const; // Check the type 588 589 // If the store is to Field memory and the pointer is non-null, we can 590 // zero out the control input. 591 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 592 593 // Compute a new Type for this node. Basically we just do the pre-check, 594 // then call the virtual add() to set the type. 595 virtual const Type* Value(PhaseGVN* phase) const; 596 597 // Check for identity function on memory (Load then Store at same address) 598 virtual Node* Identity(PhaseGVN* phase); 599 600 // Do not match memory edge 601 virtual uint match_edge(uint idx) const; 602 603 virtual const Type *bottom_type() const; // returns Type::MEMORY 604 605 // Map a store opcode to its corresponding own opcode, trivially. 606 virtual int store_Opcode() const { return Opcode(); } 607 608 // have all possible loads of the value stored been optimized away? 609 bool value_never_loaded(PhaseTransform *phase) const; 610 }; 611 612 //------------------------------StoreBNode------------------------------------- 613 // Store byte to memory 614 class StoreBNode : public StoreNode { 615 public: 616 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 617 : StoreNode(c, mem, adr, at, val, mo) {} 618 virtual int Opcode() const; 619 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 620 virtual BasicType memory_type() const { return T_BYTE; } 621 }; 622 623 //------------------------------StoreCNode------------------------------------- 624 // Store char/short to memory 625 class StoreCNode : public StoreNode { 626 public: 627 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 628 : StoreNode(c, mem, adr, at, val, mo) {} 629 virtual int Opcode() const; 630 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 631 virtual BasicType memory_type() const { return T_CHAR; } 632 }; 633 634 //------------------------------StoreINode------------------------------------- 635 // Store int to memory 636 class StoreINode : public StoreNode { 637 public: 638 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 639 : StoreNode(c, mem, adr, at, val, mo) {} 640 virtual int Opcode() const; 641 virtual BasicType memory_type() const { return T_INT; } 642 }; 643 644 //------------------------------StoreLNode------------------------------------- 645 // Store long to memory 646 class StoreLNode : public StoreNode { 647 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 648 virtual uint cmp( const Node &n ) const { 649 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 650 && StoreNode::cmp(n); 651 } 652 virtual uint size_of() const { return sizeof(*this); } 653 const bool _require_atomic_access; // is piecewise store forbidden? 654 655 public: 656 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 657 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 658 virtual int Opcode() const; 659 virtual BasicType memory_type() const { return T_LONG; } 660 bool require_atomic_access() const { return _require_atomic_access; } 661 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 662 #ifndef PRODUCT 663 virtual void dump_spec(outputStream *st) const { 664 StoreNode::dump_spec(st); 665 if (_require_atomic_access) st->print(" Atomic!"); 666 } 667 #endif 668 }; 669 670 //------------------------------StoreFNode------------------------------------- 671 // Store float to memory 672 class StoreFNode : public StoreNode { 673 public: 674 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 675 : StoreNode(c, mem, adr, at, val, mo) {} 676 virtual int Opcode() const; 677 virtual BasicType memory_type() const { return T_FLOAT; } 678 }; 679 680 //------------------------------StoreDNode------------------------------------- 681 // Store double to memory 682 class StoreDNode : public StoreNode { 683 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 684 virtual uint cmp( const Node &n ) const { 685 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 686 && StoreNode::cmp(n); 687 } 688 virtual uint size_of() const { return sizeof(*this); } 689 const bool _require_atomic_access; // is piecewise store forbidden? 690 public: 691 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 692 MemOrd mo, bool require_atomic_access = false) 693 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 694 virtual int Opcode() const; 695 virtual BasicType memory_type() const { return T_DOUBLE; } 696 bool require_atomic_access() const { return _require_atomic_access; } 697 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 698 #ifndef PRODUCT 699 virtual void dump_spec(outputStream *st) const { 700 StoreNode::dump_spec(st); 701 if (_require_atomic_access) st->print(" Atomic!"); 702 } 703 #endif 704 705 }; 706 707 //------------------------------StorePNode------------------------------------- 708 // Store pointer to memory 709 class StorePNode : public StoreNode { 710 public: 711 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 712 : StoreNode(c, mem, adr, at, val, mo) {} 713 virtual int Opcode() const; 714 virtual BasicType memory_type() const { return T_ADDRESS; } 715 }; 716 717 //------------------------------StoreNNode------------------------------------- 718 // Store narrow oop to memory 719 class StoreNNode : public StoreNode { 720 public: 721 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 722 : StoreNode(c, mem, adr, at, val, mo) {} 723 virtual int Opcode() const; 724 virtual BasicType memory_type() const { return T_NARROWOOP; } 725 }; 726 727 //------------------------------StoreNKlassNode-------------------------------------- 728 // Store narrow klass to memory 729 class StoreNKlassNode : public StoreNNode { 730 public: 731 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 732 : StoreNNode(c, mem, adr, at, val, mo) {} 733 virtual int Opcode() const; 734 virtual BasicType memory_type() const { return T_NARROWKLASS; } 735 }; 736 737 //------------------------------StoreCMNode----------------------------------- 738 // Store card-mark byte to memory for CM 739 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 740 // Preceeding equivalent StoreCMs may be eliminated. 741 class StoreCMNode : public StoreNode { 742 private: 743 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 744 virtual uint cmp( const Node &n ) const { 745 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 746 && StoreNode::cmp(n); 747 } 748 virtual uint size_of() const { return sizeof(*this); } 749 int _oop_alias_idx; // The alias_idx of OopStore 750 751 public: 752 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 753 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 754 _oop_alias_idx(oop_alias_idx) { 755 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 756 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 757 "bad oop alias idx"); 758 } 759 virtual int Opcode() const; 760 virtual Node* Identity(PhaseGVN* phase); 761 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 762 virtual const Type* Value(PhaseGVN* phase) const; 763 virtual BasicType memory_type() const { return T_VOID; } // unspecific 764 int oop_alias_idx() const { return _oop_alias_idx; } 765 }; 766 767 //------------------------------LoadPLockedNode--------------------------------- 768 // Load-locked a pointer from memory (either object or array). 769 // On Sparc & Intel this is implemented as a normal pointer load. 770 // On PowerPC and friends it's a real load-locked. 771 class LoadPLockedNode : public LoadPNode { 772 public: 773 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 774 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 775 virtual int Opcode() const; 776 virtual int store_Opcode() const { return Op_StorePConditional; } 777 virtual bool depends_only_on_test() const { return true; } 778 }; 779 780 //------------------------------SCMemProjNode--------------------------------------- 781 // This class defines a projection of the memory state of a store conditional node. 782 // These nodes return a value, but also update memory. 783 class SCMemProjNode : public ProjNode { 784 public: 785 enum {SCMEMPROJCON = (uint)-2}; 786 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 787 virtual int Opcode() const; 788 virtual bool is_CFG() const { return false; } 789 virtual const Type *bottom_type() const {return Type::MEMORY;} 790 virtual const TypePtr *adr_type() const { 791 Node* ctrl = in(0); 792 if (ctrl == NULL) return NULL; // node is dead 793 return ctrl->in(MemNode::Memory)->adr_type(); 794 } 795 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 796 virtual const Type* Value(PhaseGVN* phase) const; 797 #ifndef PRODUCT 798 virtual void dump_spec(outputStream *st) const {}; 799 #endif 800 }; 801 802 //------------------------------LoadStoreNode--------------------------- 803 // Note: is_Mem() method returns 'true' for this class. 804 class LoadStoreNode : public Node { 805 private: 806 const Type* const _type; // What kind of value is loaded? 807 const TypePtr* _adr_type; // What kind of memory is being addressed? 808 virtual uint size_of() const; // Size is bigger 809 public: 810 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 811 virtual bool depends_only_on_test() const { return false; } 812 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 813 814 virtual const Type *bottom_type() const { return _type; } 815 virtual uint ideal_reg() const; 816 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 817 818 bool result_not_used() const; 819 }; 820 821 class LoadStoreConditionalNode : public LoadStoreNode { 822 public: 823 enum { 824 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 825 }; 826 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 827 }; 828 829 //------------------------------StorePConditionalNode--------------------------- 830 // Conditionally store pointer to memory, if no change since prior 831 // load-locked. Sets flags for success or failure of the store. 832 class StorePConditionalNode : public LoadStoreConditionalNode { 833 public: 834 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 835 virtual int Opcode() const; 836 // Produces flags 837 virtual uint ideal_reg() const { return Op_RegFlags; } 838 }; 839 840 //------------------------------StoreIConditionalNode--------------------------- 841 // Conditionally store int to memory, if no change since prior 842 // load-locked. Sets flags for success or failure of the store. 843 class StoreIConditionalNode : public LoadStoreConditionalNode { 844 public: 845 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 846 virtual int Opcode() const; 847 // Produces flags 848 virtual uint ideal_reg() const { return Op_RegFlags; } 849 }; 850 851 //------------------------------StoreLConditionalNode--------------------------- 852 // Conditionally store long to memory, if no change since prior 853 // load-locked. Sets flags for success or failure of the store. 854 class StoreLConditionalNode : public LoadStoreConditionalNode { 855 public: 856 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 857 virtual int Opcode() const; 858 // Produces flags 859 virtual uint ideal_reg() const { return Op_RegFlags; } 860 }; 861 862 class CompareAndSwapNode : public LoadStoreConditionalNode { 863 private: 864 const MemNode::MemOrd _mem_ord; 865 public: 866 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 867 MemNode::MemOrd order() const { 868 return _mem_ord; 869 } 870 }; 871 872 class CompareAndExchangeNode : public LoadStoreNode { 873 private: 874 const MemNode::MemOrd _mem_ord; 875 public: 876 enum { 877 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 878 }; 879 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 880 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 881 init_req(ExpectedIn, ex ); 882 } 883 884 MemNode::MemOrd order() const { 885 return _mem_ord; 886 } 887 }; 888 889 //------------------------------CompareAndSwapLNode--------------------------- 890 class CompareAndSwapLNode : public CompareAndSwapNode { 891 public: 892 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 893 virtual int Opcode() const; 894 }; 895 896 897 //------------------------------CompareAndSwapINode--------------------------- 898 class CompareAndSwapINode : public CompareAndSwapNode { 899 public: 900 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 901 virtual int Opcode() const; 902 }; 903 904 905 //------------------------------CompareAndSwapPNode--------------------------- 906 class CompareAndSwapPNode : public CompareAndSwapNode { 907 public: 908 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 909 virtual int Opcode() const; 910 }; 911 912 //------------------------------CompareAndSwapNNode--------------------------- 913 class CompareAndSwapNNode : public CompareAndSwapNode { 914 public: 915 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 916 virtual int Opcode() const; 917 }; 918 919 920 //------------------------------WeakCompareAndSwapLNode--------------------------- 921 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 922 public: 923 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 924 virtual int Opcode() const; 925 }; 926 927 928 //------------------------------WeakCompareAndSwapINode--------------------------- 929 class WeakCompareAndSwapINode : public CompareAndSwapNode { 930 public: 931 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 932 virtual int Opcode() const; 933 }; 934 935 936 //------------------------------WeakCompareAndSwapPNode--------------------------- 937 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 938 public: 939 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 940 virtual int Opcode() const; 941 }; 942 943 //------------------------------WeakCompareAndSwapNNode--------------------------- 944 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 945 public: 946 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 947 virtual int Opcode() const; 948 }; 949 950 //------------------------------CompareAndExchangeLNode--------------------------- 951 class CompareAndExchangeLNode : public CompareAndExchangeNode { 952 public: 953 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 954 virtual int Opcode() const; 955 }; 956 957 958 //------------------------------CompareAndExchangeINode--------------------------- 959 class CompareAndExchangeINode : public CompareAndExchangeNode { 960 public: 961 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 962 virtual int Opcode() const; 963 }; 964 965 966 //------------------------------CompareAndExchangePNode--------------------------- 967 class CompareAndExchangePNode : public CompareAndExchangeNode { 968 public: 969 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 970 virtual int Opcode() const; 971 }; 972 973 //------------------------------CompareAndExchangeNNode--------------------------- 974 class CompareAndExchangeNNode : public CompareAndExchangeNode { 975 public: 976 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 977 virtual int Opcode() const; 978 }; 979 980 //------------------------------GetAndAddINode--------------------------- 981 class GetAndAddINode : public LoadStoreNode { 982 public: 983 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 984 virtual int Opcode() const; 985 }; 986 987 //------------------------------GetAndAddLNode--------------------------- 988 class GetAndAddLNode : public LoadStoreNode { 989 public: 990 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 991 virtual int Opcode() const; 992 }; 993 994 995 //------------------------------GetAndSetINode--------------------------- 996 class GetAndSetINode : public LoadStoreNode { 997 public: 998 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 999 virtual int Opcode() const; 1000 }; 1001 1002 //------------------------------GetAndSetINode--------------------------- 1003 class GetAndSetLNode : public LoadStoreNode { 1004 public: 1005 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1006 virtual int Opcode() const; 1007 }; 1008 1009 //------------------------------GetAndSetPNode--------------------------- 1010 class GetAndSetPNode : public LoadStoreNode { 1011 public: 1012 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1013 virtual int Opcode() const; 1014 }; 1015 1016 //------------------------------GetAndSetNNode--------------------------- 1017 class GetAndSetNNode : public LoadStoreNode { 1018 public: 1019 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1020 virtual int Opcode() const; 1021 }; 1022 1023 //------------------------------ClearArray------------------------------------- 1024 class ClearArrayNode: public Node { 1025 private: 1026 bool _is_large; 1027 public: 1028 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1029 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1030 init_class_id(Class_ClearArray); 1031 } 1032 virtual int Opcode() const; 1033 virtual const Type *bottom_type() const { return Type::MEMORY; } 1034 // ClearArray modifies array elements, and so affects only the 1035 // array memory addressed by the bottom_type of its base address. 1036 virtual const class TypePtr *adr_type() const; 1037 virtual Node* Identity(PhaseGVN* phase); 1038 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1039 virtual uint match_edge(uint idx) const; 1040 bool is_large() const { return _is_large; } 1041 1042 // Clear the given area of an object or array. 1043 // The start offset must always be aligned mod BytesPerInt. 1044 // The end offset must always be aligned mod BytesPerLong. 1045 // Return the new memory. 1046 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1047 intptr_t start_offset, 1048 intptr_t end_offset, 1049 PhaseGVN* phase); 1050 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1051 intptr_t start_offset, 1052 Node* end_offset, 1053 PhaseGVN* phase); 1054 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1055 Node* start_offset, 1056 Node* end_offset, 1057 PhaseGVN* phase); 1058 // Return allocation input memory edge if it is different instance 1059 // or itself if it is the one we are looking for. 1060 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1061 }; 1062 1063 //------------------------------MemBar----------------------------------------- 1064 // There are different flavors of Memory Barriers to match the Java Memory 1065 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1066 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1067 // volatile-load. Monitor-exit and volatile-store act as Release: no 1068 // preceding ref can be moved to after them. We insert a MemBar-Release 1069 // before a FastUnlock or volatile-store. All volatiles need to be 1070 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1071 // separate it from any following volatile-load. 1072 class MemBarNode: public MultiNode { 1073 virtual uint hash() const ; // { return NO_HASH; } 1074 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1075 1076 virtual uint size_of() const { return sizeof(*this); } 1077 // Memory type this node is serializing. Usually either rawptr or bottom. 1078 const TypePtr* _adr_type; 1079 1080 public: 1081 enum { 1082 Precedent = TypeFunc::Parms // optional edge to force precedence 1083 }; 1084 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1085 virtual int Opcode() const = 0; 1086 virtual const class TypePtr *adr_type() const { return _adr_type; } 1087 virtual const Type* Value(PhaseGVN* phase) const; 1088 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1089 virtual uint match_edge(uint idx) const { return 0; } 1090 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1091 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1092 // Factory method. Builds a wide or narrow membar. 1093 // Optional 'precedent' becomes an extra edge if not null. 1094 static MemBarNode* make(Compile* C, int opcode, 1095 int alias_idx = Compile::AliasIdxBot, 1096 Node* precedent = NULL); 1097 }; 1098 1099 // "Acquire" - no following ref can move before (but earlier refs can 1100 // follow, like an early Load stalled in cache). Requires multi-cpu 1101 // visibility. Inserted after a volatile load. 1102 class MemBarAcquireNode: public MemBarNode { 1103 public: 1104 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1105 : MemBarNode(C, alias_idx, precedent) {} 1106 virtual int Opcode() const; 1107 }; 1108 1109 // "Acquire" - no following ref can move before (but earlier refs can 1110 // follow, like an early Load stalled in cache). Requires multi-cpu 1111 // visibility. Inserted independ of any load, as required 1112 // for intrinsic Unsafe.loadFence(). 1113 class LoadFenceNode: public MemBarNode { 1114 public: 1115 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1116 : MemBarNode(C, alias_idx, precedent) {} 1117 virtual int Opcode() const; 1118 }; 1119 1120 // "Release" - no earlier ref can move after (but later refs can move 1121 // up, like a speculative pipelined cache-hitting Load). Requires 1122 // multi-cpu visibility. Inserted before a volatile store. 1123 class MemBarReleaseNode: public MemBarNode { 1124 public: 1125 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1126 : MemBarNode(C, alias_idx, precedent) {} 1127 virtual int Opcode() const; 1128 }; 1129 1130 // "Release" - no earlier ref can move after (but later refs can move 1131 // up, like a speculative pipelined cache-hitting Load). Requires 1132 // multi-cpu visibility. Inserted independent of any store, as required 1133 // for intrinsic Unsafe.storeFence(). 1134 class StoreFenceNode: public MemBarNode { 1135 public: 1136 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1137 : MemBarNode(C, alias_idx, precedent) {} 1138 virtual int Opcode() const; 1139 }; 1140 1141 // "Acquire" - no following ref can move before (but earlier refs can 1142 // follow, like an early Load stalled in cache). Requires multi-cpu 1143 // visibility. Inserted after a FastLock. 1144 class MemBarAcquireLockNode: public MemBarNode { 1145 public: 1146 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1147 : MemBarNode(C, alias_idx, precedent) {} 1148 virtual int Opcode() const; 1149 }; 1150 1151 // "Release" - no earlier ref can move after (but later refs can move 1152 // up, like a speculative pipelined cache-hitting Load). Requires 1153 // multi-cpu visibility. Inserted before a FastUnLock. 1154 class MemBarReleaseLockNode: public MemBarNode { 1155 public: 1156 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1157 : MemBarNode(C, alias_idx, precedent) {} 1158 virtual int Opcode() const; 1159 }; 1160 1161 class MemBarStoreStoreNode: public MemBarNode { 1162 public: 1163 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1164 : MemBarNode(C, alias_idx, precedent) { 1165 init_class_id(Class_MemBarStoreStore); 1166 } 1167 virtual int Opcode() const; 1168 }; 1169 1170 // Ordering between a volatile store and a following volatile load. 1171 // Requires multi-CPU visibility? 1172 class MemBarVolatileNode: public MemBarNode { 1173 public: 1174 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1175 : MemBarNode(C, alias_idx, precedent) {} 1176 virtual int Opcode() const; 1177 }; 1178 1179 // Ordering within the same CPU. Used to order unsafe memory references 1180 // inside the compiler when we lack alias info. Not needed "outside" the 1181 // compiler because the CPU does all the ordering for us. 1182 class MemBarCPUOrderNode: public MemBarNode { 1183 public: 1184 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1185 : MemBarNode(C, alias_idx, precedent) {} 1186 virtual int Opcode() const; 1187 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1188 }; 1189 1190 // Isolation of object setup after an AllocateNode and before next safepoint. 1191 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1192 class InitializeNode: public MemBarNode { 1193 friend class AllocateNode; 1194 1195 enum { 1196 Incomplete = 0, 1197 Complete = 1, 1198 WithArraycopy = 2 1199 }; 1200 int _is_complete; 1201 1202 bool _does_not_escape; 1203 1204 public: 1205 enum { 1206 Control = TypeFunc::Control, 1207 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1208 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1209 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1210 }; 1211 1212 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1213 virtual int Opcode() const; 1214 virtual uint size_of() const { return sizeof(*this); } 1215 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1216 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1217 1218 // Manage incoming memory edges via a MergeMem on in(Memory): 1219 Node* memory(uint alias_idx); 1220 1221 // The raw memory edge coming directly from the Allocation. 1222 // The contents of this memory are *always* all-zero-bits. 1223 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1224 1225 // Return the corresponding allocation for this initialization (or null if none). 1226 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1227 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1228 AllocateNode* allocation(); 1229 1230 // Anything other than zeroing in this init? 1231 bool is_non_zero(); 1232 1233 // An InitializeNode must completed before macro expansion is done. 1234 // Completion requires that the AllocateNode must be followed by 1235 // initialization of the new memory to zero, then to any initializers. 1236 bool is_complete() { return _is_complete != Incomplete; } 1237 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1238 1239 // Mark complete. (Must not yet be complete.) 1240 void set_complete(PhaseGVN* phase); 1241 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1242 1243 bool does_not_escape() { return _does_not_escape; } 1244 void set_does_not_escape() { _does_not_escape = true; } 1245 1246 #ifdef ASSERT 1247 // ensure all non-degenerate stores are ordered and non-overlapping 1248 bool stores_are_sane(PhaseTransform* phase); 1249 #endif //ASSERT 1250 1251 // See if this store can be captured; return offset where it initializes. 1252 // Return 0 if the store cannot be moved (any sort of problem). 1253 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1254 1255 // Capture another store; reformat it to write my internal raw memory. 1256 // Return the captured copy, else NULL if there is some sort of problem. 1257 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1258 1259 // Find captured store which corresponds to the range [start..start+size). 1260 // Return my own memory projection (meaning the initial zero bits) 1261 // if there is no such store. Return NULL if there is a problem. 1262 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1263 1264 // Called when the associated AllocateNode is expanded into CFG. 1265 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1266 intptr_t header_size, Node* size_in_bytes, 1267 PhaseGVN* phase); 1268 1269 private: 1270 void remove_extra_zeroes(); 1271 1272 // Find out where a captured store should be placed (or already is placed). 1273 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1274 PhaseTransform* phase); 1275 1276 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1277 1278 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1279 1280 bool detect_init_independence(Node* n, int& count); 1281 1282 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1283 PhaseGVN* phase); 1284 1285 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1286 }; 1287 1288 //------------------------------MergeMem--------------------------------------- 1289 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1290 class MergeMemNode: public Node { 1291 virtual uint hash() const ; // { return NO_HASH; } 1292 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1293 friend class MergeMemStream; 1294 MergeMemNode(Node* def); // clients use MergeMemNode::make 1295 1296 public: 1297 // If the input is a whole memory state, clone it with all its slices intact. 1298 // Otherwise, make a new memory state with just that base memory input. 1299 // In either case, the result is a newly created MergeMem. 1300 static MergeMemNode* make(Node* base_memory); 1301 1302 virtual int Opcode() const; 1303 virtual Node* Identity(PhaseGVN* phase); 1304 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1305 virtual uint ideal_reg() const { return NotAMachineReg; } 1306 virtual uint match_edge(uint idx) const { return 0; } 1307 virtual const RegMask &out_RegMask() const; 1308 virtual const Type *bottom_type() const { return Type::MEMORY; } 1309 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1310 // sparse accessors 1311 // Fetch the previously stored "set_memory_at", or else the base memory. 1312 // (Caller should clone it if it is a phi-nest.) 1313 Node* memory_at(uint alias_idx) const; 1314 // set the memory, regardless of its previous value 1315 void set_memory_at(uint alias_idx, Node* n); 1316 // the "base" is the memory that provides the non-finite support 1317 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1318 // warning: setting the base can implicitly set any of the other slices too 1319 void set_base_memory(Node* def); 1320 // sentinel value which denotes a copy of the base memory: 1321 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1322 static Node* make_empty_memory(); // where the sentinel comes from 1323 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1324 // hook for the iterator, to perform any necessary setup 1325 void iteration_setup(const MergeMemNode* other = NULL); 1326 // push sentinels until I am at least as long as the other (semantic no-op) 1327 void grow_to_match(const MergeMemNode* other); 1328 bool verify_sparse() const PRODUCT_RETURN0; 1329 #ifndef PRODUCT 1330 virtual void dump_spec(outputStream *st) const; 1331 #endif 1332 }; 1333 1334 class MergeMemStream : public StackObj { 1335 private: 1336 MergeMemNode* _mm; 1337 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1338 Node* _mm_base; // loop-invariant base memory of _mm 1339 int _idx; 1340 int _cnt; 1341 Node* _mem; 1342 Node* _mem2; 1343 int _cnt2; 1344 1345 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1346 // subsume_node will break sparseness at times, whenever a memory slice 1347 // folds down to a copy of the base ("fat") memory. In such a case, 1348 // the raw edge will update to base, although it should be top. 1349 // This iterator will recognize either top or base_memory as an 1350 // "empty" slice. See is_empty, is_empty2, and next below. 1351 // 1352 // The sparseness property is repaired in MergeMemNode::Ideal. 1353 // As long as access to a MergeMem goes through this iterator 1354 // or the memory_at accessor, flaws in the sparseness will 1355 // never be observed. 1356 // 1357 // Also, iteration_setup repairs sparseness. 1358 assert(mm->verify_sparse(), "please, no dups of base"); 1359 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1360 1361 _mm = mm; 1362 _mm_base = mm->base_memory(); 1363 _mm2 = mm2; 1364 _cnt = mm->req(); 1365 _idx = Compile::AliasIdxBot-1; // start at the base memory 1366 _mem = NULL; 1367 _mem2 = NULL; 1368 } 1369 1370 #ifdef ASSERT 1371 Node* check_memory() const { 1372 if (at_base_memory()) 1373 return _mm->base_memory(); 1374 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1375 return _mm->memory_at(_idx); 1376 else 1377 return _mm_base; 1378 } 1379 Node* check_memory2() const { 1380 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1381 } 1382 #endif 1383 1384 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1385 void assert_synch() const { 1386 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1387 "no side-effects except through the stream"); 1388 } 1389 1390 public: 1391 1392 // expected usages: 1393 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1394 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1395 1396 // iterate over one merge 1397 MergeMemStream(MergeMemNode* mm) { 1398 mm->iteration_setup(); 1399 init(mm); 1400 debug_only(_cnt2 = 999); 1401 } 1402 // iterate in parallel over two merges 1403 // only iterates through non-empty elements of mm2 1404 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1405 assert(mm2, "second argument must be a MergeMem also"); 1406 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1407 mm->iteration_setup(mm2); 1408 init(mm, mm2); 1409 _cnt2 = mm2->req(); 1410 } 1411 #ifdef ASSERT 1412 ~MergeMemStream() { 1413 assert_synch(); 1414 } 1415 #endif 1416 1417 MergeMemNode* all_memory() const { 1418 return _mm; 1419 } 1420 Node* base_memory() const { 1421 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1422 return _mm_base; 1423 } 1424 const MergeMemNode* all_memory2() const { 1425 assert(_mm2 != NULL, ""); 1426 return _mm2; 1427 } 1428 bool at_base_memory() const { 1429 return _idx == Compile::AliasIdxBot; 1430 } 1431 int alias_idx() const { 1432 assert(_mem, "must call next 1st"); 1433 return _idx; 1434 } 1435 1436 const TypePtr* adr_type() const { 1437 return Compile::current()->get_adr_type(alias_idx()); 1438 } 1439 1440 const TypePtr* adr_type(Compile* C) const { 1441 return C->get_adr_type(alias_idx()); 1442 } 1443 bool is_empty() const { 1444 assert(_mem, "must call next 1st"); 1445 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1446 return _mem->is_top(); 1447 } 1448 bool is_empty2() const { 1449 assert(_mem2, "must call next 1st"); 1450 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1451 return _mem2->is_top(); 1452 } 1453 Node* memory() const { 1454 assert(!is_empty(), "must not be empty"); 1455 assert_synch(); 1456 return _mem; 1457 } 1458 // get the current memory, regardless of empty or non-empty status 1459 Node* force_memory() const { 1460 assert(!is_empty() || !at_base_memory(), ""); 1461 // Use _mm_base to defend against updates to _mem->base_memory(). 1462 Node *mem = _mem->is_top() ? _mm_base : _mem; 1463 assert(mem == check_memory(), ""); 1464 return mem; 1465 } 1466 Node* memory2() const { 1467 assert(_mem2 == check_memory2(), ""); 1468 return _mem2; 1469 } 1470 void set_memory(Node* mem) { 1471 if (at_base_memory()) { 1472 // Note that this does not change the invariant _mm_base. 1473 _mm->set_base_memory(mem); 1474 } else { 1475 _mm->set_memory_at(_idx, mem); 1476 } 1477 _mem = mem; 1478 assert_synch(); 1479 } 1480 1481 // Recover from a side effect to the MergeMemNode. 1482 void set_memory() { 1483 _mem = _mm->in(_idx); 1484 } 1485 1486 bool next() { return next(false); } 1487 bool next2() { return next(true); } 1488 1489 bool next_non_empty() { return next_non_empty(false); } 1490 bool next_non_empty2() { return next_non_empty(true); } 1491 // next_non_empty2 can yield states where is_empty() is true 1492 1493 private: 1494 // find the next item, which might be empty 1495 bool next(bool have_mm2) { 1496 assert((_mm2 != NULL) == have_mm2, "use other next"); 1497 assert_synch(); 1498 if (++_idx < _cnt) { 1499 // Note: This iterator allows _mm to be non-sparse. 1500 // It behaves the same whether _mem is top or base_memory. 1501 _mem = _mm->in(_idx); 1502 if (have_mm2) 1503 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1504 return true; 1505 } 1506 return false; 1507 } 1508 1509 // find the next non-empty item 1510 bool next_non_empty(bool have_mm2) { 1511 while (next(have_mm2)) { 1512 if (!is_empty()) { 1513 // make sure _mem2 is filled in sensibly 1514 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1515 return true; 1516 } else if (have_mm2 && !is_empty2()) { 1517 return true; // is_empty() == true 1518 } 1519 } 1520 return false; 1521 } 1522 }; 1523 1524 //------------------------------Prefetch--------------------------------------- 1525 1526 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1527 class PrefetchAllocationNode : public Node { 1528 public: 1529 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1530 virtual int Opcode() const; 1531 virtual uint ideal_reg() const { return NotAMachineReg; } 1532 virtual uint match_edge(uint idx) const { return idx==2; } 1533 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1534 }; 1535 1536 #endif // SHARE_VM_OPTO_MEMNODE_HPP