1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MEMNODE_HPP 26 #define SHARE_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 bool _unsafe_access; // Access of unsafe origin. 46 protected: 47 #ifdef ASSERT 48 const TypePtr* _adr_type; // What kind of memory is being addressed? 49 #endif 50 virtual uint size_of() const; 51 public: 52 enum { Control, // When is it safe to do this load? 53 Memory, // Chunk of memory is being loaded from 54 Address, // Actually address, derived from base 55 ValueIn, // Value to store 56 OopStore // Preceeding oop store, only in StoreCM 57 }; 58 typedef enum { unordered = 0, 59 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 60 release, // Store has to release or be preceded by MemBarRelease. 61 seqcst, // LoadStore has to have both acquire and release semantics. 62 unset // The memory ordering is not set (used for testing) 63 } MemOrd; 64 protected: 65 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 66 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 67 init_class_id(Class_Mem); 68 debug_only(_adr_type=at; adr_type();) 69 } 70 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 71 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 72 init_class_id(Class_Mem); 73 debug_only(_adr_type=at; adr_type();) 74 } 75 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 76 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 77 init_class_id(Class_Mem); 78 debug_only(_adr_type=at; adr_type();) 79 } 80 81 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 82 static bool check_if_adr_maybe_raw(Node* adr); 83 84 public: 85 // Helpers for the optimizer. Documented in memnode.cpp. 86 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 87 Node* p2, AllocateNode* a2, 88 PhaseTransform* phase); 89 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 90 91 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 92 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 93 // This one should probably be a phase-specific function: 94 static bool all_controls_dominate(Node* dom, Node* sub); 95 96 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 97 98 // Shared code for Ideal methods: 99 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 100 101 // Helper function for adr_type() implementations. 102 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 103 104 // Raw access function, to allow copying of adr_type efficiently in 105 // product builds and retain the debug info for debug builds. 106 const TypePtr *raw_adr_type() const { 107 #ifdef ASSERT 108 return _adr_type; 109 #else 110 return 0; 111 #endif 112 } 113 114 #ifdef ASSERT 115 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; } 116 #endif 117 118 // Map a load or store opcode to its corresponding store opcode. 119 // (Return -1 if unknown.) 120 virtual int store_Opcode() const { return -1; } 121 122 // What is the type of the value in memory? (T_VOID mean "unspecified".) 123 virtual BasicType memory_type() const = 0; 124 virtual int memory_size() const { 125 #ifdef ASSERT 126 return type2aelembytes(memory_type(), true); 127 #else 128 return type2aelembytes(memory_type()); 129 #endif 130 } 131 132 // Search through memory states which precede this node (load or store). 133 // Look for an exact match for the address, with no intervening 134 // aliased stores. 135 Node* find_previous_store(PhaseTransform* phase); 136 137 // Can this node (load or store) accurately see a stored value in 138 // the given memory state? (The state may or may not be in(Memory).) 139 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 140 141 void set_unaligned_access() { _unaligned_access = true; } 142 bool is_unaligned_access() const { return _unaligned_access; } 143 void set_mismatched_access() { _mismatched_access = true; } 144 bool is_mismatched_access() const { return _mismatched_access; } 145 void set_unsafe_access() { _unsafe_access = true; } 146 bool is_unsafe_access() const { return _unsafe_access; } 147 148 #ifndef PRODUCT 149 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 150 virtual void dump_spec(outputStream *st) const; 151 #endif 152 }; 153 154 //------------------------------LoadNode--------------------------------------- 155 // Load value; requires Memory and Address 156 class LoadNode : public MemNode { 157 public: 158 // Some loads (from unsafe) should be pinned: they don't depend only 159 // on the dominating test. The field _control_dependency below records 160 // whether that node depends only on the dominating test. 161 // Methods used to build LoadNodes pass an argument of type enum 162 // ControlDependency instead of a boolean because those methods 163 // typically have multiple boolean parameters with default values: 164 // passing the wrong boolean to one of these parameters by mistake 165 // goes easily unnoticed. Using an enum, the compiler can check that 166 // the type of a value and the type of the parameter match. 167 enum ControlDependency { 168 Pinned, 169 DependsOnlyOnTest 170 }; 171 private: 172 // LoadNode::hash() doesn't take the _control_dependency field 173 // into account: If the graph already has a non-pinned LoadNode and 174 // we add a pinned LoadNode with the same inputs, it's safe for GVN 175 // to replace the pinned LoadNode with the non-pinned LoadNode, 176 // otherwise it wouldn't be safe to have a non pinned LoadNode with 177 // those inputs in the first place. If the graph already has a 178 // pinned LoadNode and we add a non pinned LoadNode with the same 179 // inputs, it's safe (but suboptimal) for GVN to replace the 180 // non-pinned LoadNode by the pinned LoadNode. 181 ControlDependency _control_dependency; 182 183 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 184 // loads that can be reordered, and such requiring acquire semantics to 185 // adhere to the Java specification. The required behaviour is stored in 186 // this field. 187 const MemOrd _mo; 188 189 protected: 190 virtual bool cmp(const Node &n) const; 191 virtual uint size_of() const; // Size is bigger 192 // Should LoadNode::Ideal() attempt to remove control edges? 193 virtual bool can_remove_control() const; 194 const Type* const _type; // What kind of value is loaded? 195 196 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 197 public: 198 199 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 200 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) { 201 init_class_id(Class_Load); 202 } 203 inline bool is_unordered() const { return !is_acquire(); } 204 inline bool is_acquire() const { 205 assert(_mo == unordered || _mo == acquire, "unexpected"); 206 return _mo == acquire; 207 } 208 inline bool is_unsigned() const { 209 int lop = Opcode(); 210 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 211 } 212 213 // Polymorphic factory method: 214 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 215 const TypePtr* at, const Type *rt, BasicType bt, 216 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 217 bool unaligned = false, bool mismatched = false, bool unsafe = false); 218 219 virtual uint hash() const; // Check the type 220 221 // Handle algebraic identities here. If we have an identity, return the Node 222 // we are equivalent to. We look for Load of a Store. 223 virtual Node* Identity(PhaseGVN* phase); 224 225 // If the load is from Field memory and the pointer is non-null, it might be possible to 226 // zero out the control input. 227 // If the offset is constant and the base is an object allocation, 228 // try to hook me up to the exact initializing store. 229 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 230 231 // Split instance field load through Phi. 232 Node* split_through_phi(PhaseGVN *phase); 233 234 // Recover original value from boxed values 235 Node *eliminate_autobox(PhaseGVN *phase); 236 237 // Compute a new Type for this node. Basically we just do the pre-check, 238 // then call the virtual add() to set the type. 239 virtual const Type* Value(PhaseGVN* phase) const; 240 241 // Common methods for LoadKlass and LoadNKlass nodes. 242 const Type* klass_value_common(PhaseGVN* phase) const; 243 Node* klass_identity_common(PhaseGVN* phase); 244 245 virtual uint ideal_reg() const; 246 virtual const Type *bottom_type() const; 247 // Following method is copied from TypeNode: 248 void set_type(const Type* t) { 249 assert(t != NULL, "sanity"); 250 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 251 *(const Type**)&_type = t; // cast away const-ness 252 // If this node is in the hash table, make sure it doesn't need a rehash. 253 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 254 } 255 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 256 257 // Do not match memory edge 258 virtual uint match_edge(uint idx) const; 259 260 // Map a load opcode to its corresponding store opcode. 261 virtual int store_Opcode() const = 0; 262 263 // Check if the load's memory input is a Phi node with the same control. 264 bool is_instance_field_load_with_local_phi(Node* ctrl); 265 266 Node* convert_to_unsigned_load(PhaseGVN& gvn); 267 Node* convert_to_signed_load(PhaseGVN& gvn); 268 269 #ifndef PRODUCT 270 virtual void dump_spec(outputStream *st) const; 271 #endif 272 #ifdef ASSERT 273 // Helper function to allow a raw load without control edge for some cases 274 static bool is_immutable_value(Node* adr); 275 #endif 276 protected: 277 const Type* load_array_final_field(const TypeKlassPtr *tkls, 278 ciKlass* klass) const; 279 280 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 281 282 // depends_only_on_test is almost always true, and needs to be almost always 283 // true to enable key hoisting & commoning optimizations. However, for the 284 // special case of RawPtr loads from TLS top & end, and other loads performed by 285 // GC barriers, the control edge carries the dependence preventing hoisting past 286 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 287 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 288 // which produce results (new raw memory state) inside of loops preventing all 289 // manner of other optimizations). Basically, it's ugly but so is the alternative. 290 // See comment in macro.cpp, around line 125 expand_allocate_common(). 291 virtual bool depends_only_on_test() const { 292 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 293 } 294 }; 295 296 //------------------------------LoadBNode-------------------------------------- 297 // Load a byte (8bits signed) from memory 298 class LoadBNode : public LoadNode { 299 public: 300 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 301 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 302 virtual int Opcode() const; 303 virtual uint ideal_reg() const { return Op_RegI; } 304 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 305 virtual const Type* Value(PhaseGVN* phase) const; 306 virtual int store_Opcode() const { return Op_StoreB; } 307 virtual BasicType memory_type() const { return T_BYTE; } 308 }; 309 310 //------------------------------LoadUBNode------------------------------------- 311 // Load a unsigned byte (8bits unsigned) from memory 312 class LoadUBNode : public LoadNode { 313 public: 314 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 315 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 316 virtual int Opcode() const; 317 virtual uint ideal_reg() const { return Op_RegI; } 318 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 319 virtual const Type* Value(PhaseGVN* phase) const; 320 virtual int store_Opcode() const { return Op_StoreB; } 321 virtual BasicType memory_type() const { return T_BYTE; } 322 }; 323 324 //------------------------------LoadUSNode------------------------------------- 325 // Load an unsigned short/char (16bits unsigned) from memory 326 class LoadUSNode : public LoadNode { 327 public: 328 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 329 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 330 virtual int Opcode() const; 331 virtual uint ideal_reg() const { return Op_RegI; } 332 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 333 virtual const Type* Value(PhaseGVN* phase) const; 334 virtual int store_Opcode() const { return Op_StoreC; } 335 virtual BasicType memory_type() const { return T_CHAR; } 336 }; 337 338 //------------------------------LoadSNode-------------------------------------- 339 // Load a short (16bits signed) from memory 340 class LoadSNode : public LoadNode { 341 public: 342 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 343 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 344 virtual int Opcode() const; 345 virtual uint ideal_reg() const { return Op_RegI; } 346 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 347 virtual const Type* Value(PhaseGVN* phase) const; 348 virtual int store_Opcode() const { return Op_StoreC; } 349 virtual BasicType memory_type() const { return T_SHORT; } 350 }; 351 352 //------------------------------LoadINode-------------------------------------- 353 // Load an integer from memory 354 class LoadINode : public LoadNode { 355 public: 356 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 357 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 358 virtual int Opcode() const; 359 virtual uint ideal_reg() const { return Op_RegI; } 360 virtual int store_Opcode() const { return Op_StoreI; } 361 virtual BasicType memory_type() const { return T_INT; } 362 }; 363 364 //------------------------------LoadRangeNode---------------------------------- 365 // Load an array length from the array 366 class LoadRangeNode : public LoadINode { 367 public: 368 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 369 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 370 virtual int Opcode() const; 371 virtual const Type* Value(PhaseGVN* phase) const; 372 virtual Node* Identity(PhaseGVN* phase); 373 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 374 }; 375 376 //------------------------------LoadLNode-------------------------------------- 377 // Load a long from memory 378 class LoadLNode : public LoadNode { 379 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 380 virtual bool cmp( const Node &n ) const { 381 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 382 && LoadNode::cmp(n); 383 } 384 virtual uint size_of() const { return sizeof(*this); } 385 const bool _require_atomic_access; // is piecewise load forbidden? 386 387 public: 388 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 389 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 390 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 391 virtual int Opcode() const; 392 virtual uint ideal_reg() const { return Op_RegL; } 393 virtual int store_Opcode() const { return Op_StoreL; } 394 virtual BasicType memory_type() const { return T_LONG; } 395 bool require_atomic_access() const { return _require_atomic_access; } 396 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 397 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 398 bool unaligned = false, bool mismatched = false, bool unsafe = false); 399 #ifndef PRODUCT 400 virtual void dump_spec(outputStream *st) const { 401 LoadNode::dump_spec(st); 402 if (_require_atomic_access) st->print(" Atomic!"); 403 } 404 #endif 405 }; 406 407 //------------------------------LoadL_unalignedNode---------------------------- 408 // Load a long from unaligned memory 409 class LoadL_unalignedNode : public LoadLNode { 410 public: 411 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 412 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 413 virtual int Opcode() const; 414 }; 415 416 //------------------------------LoadFNode-------------------------------------- 417 // Load a float (64 bits) from memory 418 class LoadFNode : public LoadNode { 419 public: 420 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 421 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 422 virtual int Opcode() const; 423 virtual uint ideal_reg() const { return Op_RegF; } 424 virtual int store_Opcode() const { return Op_StoreF; } 425 virtual BasicType memory_type() const { return T_FLOAT; } 426 }; 427 428 //------------------------------LoadDNode-------------------------------------- 429 // Load a double (64 bits) from memory 430 class LoadDNode : public LoadNode { 431 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 432 virtual bool cmp( const Node &n ) const { 433 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 434 && LoadNode::cmp(n); 435 } 436 virtual uint size_of() const { return sizeof(*this); } 437 const bool _require_atomic_access; // is piecewise load forbidden? 438 439 public: 440 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 441 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 442 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 443 virtual int Opcode() const; 444 virtual uint ideal_reg() const { return Op_RegD; } 445 virtual int store_Opcode() const { return Op_StoreD; } 446 virtual BasicType memory_type() const { return T_DOUBLE; } 447 bool require_atomic_access() const { return _require_atomic_access; } 448 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 449 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 450 bool unaligned = false, bool mismatched = false, bool unsafe = false); 451 #ifndef PRODUCT 452 virtual void dump_spec(outputStream *st) const { 453 LoadNode::dump_spec(st); 454 if (_require_atomic_access) st->print(" Atomic!"); 455 } 456 #endif 457 }; 458 459 //------------------------------LoadD_unalignedNode---------------------------- 460 // Load a double from unaligned memory 461 class LoadD_unalignedNode : public LoadDNode { 462 public: 463 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 464 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 465 virtual int Opcode() const; 466 }; 467 468 //------------------------------LoadPNode-------------------------------------- 469 // Load a pointer from memory (either object or array) 470 class LoadPNode : public LoadNode { 471 public: 472 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 473 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 474 virtual int Opcode() const; 475 virtual uint ideal_reg() const { return Op_RegP; } 476 virtual int store_Opcode() const { return Op_StoreP; } 477 virtual BasicType memory_type() const { return T_ADDRESS; } 478 }; 479 480 481 //------------------------------LoadNNode-------------------------------------- 482 // Load a narrow oop from memory (either object or array) 483 class LoadNNode : public LoadNode { 484 public: 485 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 486 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 487 virtual int Opcode() const; 488 virtual uint ideal_reg() const { return Op_RegN; } 489 virtual int store_Opcode() const { return Op_StoreN; } 490 virtual BasicType memory_type() const { return T_NARROWOOP; } 491 }; 492 493 //------------------------------LoadKlassNode---------------------------------- 494 // Load a Klass from an object 495 class LoadKlassNode : public LoadPNode { 496 protected: 497 // In most cases, LoadKlassNode does not have the control input set. If the control 498 // input is set, it must not be removed (by LoadNode::Ideal()). 499 virtual bool can_remove_control() const; 500 public: 501 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 502 : LoadPNode(c, mem, adr, at, tk, mo) {} 503 virtual int Opcode() const; 504 virtual const Type* Value(PhaseGVN* phase) const; 505 virtual Node* Identity(PhaseGVN* phase); 506 virtual bool depends_only_on_test() const { return true; } 507 508 // Polymorphic factory method: 509 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 510 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 511 }; 512 513 //------------------------------LoadNKlassNode--------------------------------- 514 // Load a narrow Klass from an object. 515 class LoadNKlassNode : public LoadNNode { 516 public: 517 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 518 : LoadNNode(c, mem, adr, at, tk, mo) {} 519 virtual int Opcode() const; 520 virtual uint ideal_reg() const { return Op_RegN; } 521 virtual int store_Opcode() const { return Op_StoreNKlass; } 522 virtual BasicType memory_type() const { return T_NARROWKLASS; } 523 524 virtual const Type* Value(PhaseGVN* phase) const; 525 virtual Node* Identity(PhaseGVN* phase); 526 virtual bool depends_only_on_test() const { return true; } 527 }; 528 529 // Retrieve the null free property from an array klass. This is 530 // treated a bit like a field that would be read from the klass 531 // structure at runtime except, the implementation encodes the 532 // property as a bit in the klass header field of the array. This 533 // implementation detail is hidden under this node so it doesn't make 534 // a difference for high level optimizations. At final graph reshaping 535 // time, this node is turned into the actual logical operations that 536 // extract the property from the klass pointer. For this to work 537 // correctly, GetNullFreePropertyNode must take a LoadKlass/LoadNKlass 538 // input. The Ideal transformation splits the GetNullFreePropertyNode 539 // through phis, Value returns a constant if the node's input is a 540 // constant. These 2 should guarantee GetNullFreePropertyNode does 541 // indeed have a LoadKlass/LoadNKlass input at final graph reshaping 542 // time. 543 class GetNullFreePropertyNode : public Node { 544 public: 545 GetNullFreePropertyNode(Node* klass) : Node(NULL, klass) {} 546 virtual int Opcode() const; 547 virtual const Type* Value(PhaseGVN* phase) const; 548 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 549 virtual const Type* bottom_type() const { 550 if (in(1)->bottom_type()->isa_klassptr()) { 551 return TypeLong::LONG; 552 } 553 return TypeInt::INT; 554 } 555 }; 556 557 //------------------------------StoreNode-------------------------------------- 558 // Store value; requires Store, Address and Value 559 class StoreNode : public MemNode { 560 private: 561 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 562 // stores that can be reordered, and such requiring release semantics to 563 // adhere to the Java specification. The required behaviour is stored in 564 // this field. 565 const MemOrd _mo; 566 // Needed for proper cloning. 567 virtual uint size_of() const { return sizeof(*this); } 568 protected: 569 virtual bool cmp( const Node &n ) const; 570 virtual bool depends_only_on_test() const { return false; } 571 572 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 573 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 574 575 public: 576 // We must ensure that stores of object references will be visible 577 // only after the object's initialization. So the callers of this 578 // procedure must indicate that the store requires `release' 579 // semantics, if the stored value is an object reference that might 580 // point to a new object and may become externally visible. 581 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 582 : MemNode(c, mem, adr, at, val), _mo(mo) { 583 init_class_id(Class_Store); 584 } 585 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 586 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 587 init_class_id(Class_Store); 588 } 589 590 inline bool is_unordered() const { return !is_release(); } 591 inline bool is_release() const { 592 assert((_mo == unordered || _mo == release), "unexpected"); 593 return _mo == release; 594 } 595 596 // Conservatively release stores of object references in order to 597 // ensure visibility of object initialization. 598 static inline MemOrd release_if_reference(const BasicType t) { 599 #ifdef AARCH64 600 // AArch64 doesn't need a release store here because object 601 // initialization contains the necessary barriers. 602 return unordered; 603 #else 604 const MemOrd mo = (t == T_ARRAY || 605 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 606 t == T_OBJECT) ? release : unordered; 607 return mo; 608 #endif 609 } 610 611 // Polymorphic factory method 612 // 613 // We must ensure that stores of object references will be visible 614 // only after the object's initialization. So the callers of this 615 // procedure must indicate that the store requires `release' 616 // semantics, if the stored value is an object reference that might 617 // point to a new object and may become externally visible. 618 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 619 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 620 621 virtual uint hash() const; // Check the type 622 623 // If the store is to Field memory and the pointer is non-null, we can 624 // zero out the control input. 625 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 626 627 // Compute a new Type for this node. Basically we just do the pre-check, 628 // then call the virtual add() to set the type. 629 virtual const Type* Value(PhaseGVN* phase) const; 630 631 // Check for identity function on memory (Load then Store at same address) 632 virtual Node* Identity(PhaseGVN* phase); 633 634 // Do not match memory edge 635 virtual uint match_edge(uint idx) const; 636 637 virtual const Type *bottom_type() const; // returns Type::MEMORY 638 639 // Map a store opcode to its corresponding own opcode, trivially. 640 virtual int store_Opcode() const { return Opcode(); } 641 642 // have all possible loads of the value stored been optimized away? 643 bool value_never_loaded(PhaseTransform *phase) const; 644 645 MemBarNode* trailing_membar() const; 646 }; 647 648 //------------------------------StoreBNode------------------------------------- 649 // Store byte to memory 650 class StoreBNode : public StoreNode { 651 public: 652 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 653 : StoreNode(c, mem, adr, at, val, mo) {} 654 virtual int Opcode() const; 655 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 656 virtual BasicType memory_type() const { return T_BYTE; } 657 }; 658 659 //------------------------------StoreCNode------------------------------------- 660 // Store char/short to memory 661 class StoreCNode : public StoreNode { 662 public: 663 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 664 : StoreNode(c, mem, adr, at, val, mo) {} 665 virtual int Opcode() const; 666 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 667 virtual BasicType memory_type() const { return T_CHAR; } 668 }; 669 670 //------------------------------StoreINode------------------------------------- 671 // Store int to memory 672 class StoreINode : public StoreNode { 673 public: 674 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 675 : StoreNode(c, mem, adr, at, val, mo) {} 676 virtual int Opcode() const; 677 virtual BasicType memory_type() const { return T_INT; } 678 }; 679 680 //------------------------------StoreLNode------------------------------------- 681 // Store long to memory 682 class StoreLNode : public StoreNode { 683 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 684 virtual bool cmp( const Node &n ) const { 685 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 686 && StoreNode::cmp(n); 687 } 688 virtual uint size_of() const { return sizeof(*this); } 689 const bool _require_atomic_access; // is piecewise store forbidden? 690 691 public: 692 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 693 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 694 virtual int Opcode() const; 695 virtual BasicType memory_type() const { return T_LONG; } 696 bool require_atomic_access() const { return _require_atomic_access; } 697 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 698 #ifndef PRODUCT 699 virtual void dump_spec(outputStream *st) const { 700 StoreNode::dump_spec(st); 701 if (_require_atomic_access) st->print(" Atomic!"); 702 } 703 #endif 704 }; 705 706 //------------------------------StoreFNode------------------------------------- 707 // Store float to memory 708 class StoreFNode : public StoreNode { 709 public: 710 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 711 : StoreNode(c, mem, adr, at, val, mo) {} 712 virtual int Opcode() const; 713 virtual BasicType memory_type() const { return T_FLOAT; } 714 }; 715 716 //------------------------------StoreDNode------------------------------------- 717 // Store double to memory 718 class StoreDNode : public StoreNode { 719 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 720 virtual bool cmp( const Node &n ) const { 721 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 722 && StoreNode::cmp(n); 723 } 724 virtual uint size_of() const { return sizeof(*this); } 725 const bool _require_atomic_access; // is piecewise store forbidden? 726 public: 727 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 728 MemOrd mo, bool require_atomic_access = false) 729 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 730 virtual int Opcode() const; 731 virtual BasicType memory_type() const { return T_DOUBLE; } 732 bool require_atomic_access() const { return _require_atomic_access; } 733 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 734 #ifndef PRODUCT 735 virtual void dump_spec(outputStream *st) const { 736 StoreNode::dump_spec(st); 737 if (_require_atomic_access) st->print(" Atomic!"); 738 } 739 #endif 740 741 }; 742 743 //------------------------------StorePNode------------------------------------- 744 // Store pointer to memory 745 class StorePNode : public StoreNode { 746 public: 747 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 748 : StoreNode(c, mem, adr, at, val, mo) {} 749 virtual int Opcode() const; 750 virtual BasicType memory_type() const { return T_ADDRESS; } 751 }; 752 753 //------------------------------StoreNNode------------------------------------- 754 // Store narrow oop to memory 755 class StoreNNode : public StoreNode { 756 public: 757 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 758 : StoreNode(c, mem, adr, at, val, mo) {} 759 virtual int Opcode() const; 760 virtual BasicType memory_type() const { return T_NARROWOOP; } 761 }; 762 763 //------------------------------StoreNKlassNode-------------------------------------- 764 // Store narrow klass to memory 765 class StoreNKlassNode : public StoreNNode { 766 public: 767 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 768 : StoreNNode(c, mem, adr, at, val, mo) {} 769 virtual int Opcode() const; 770 virtual BasicType memory_type() const { return T_NARROWKLASS; } 771 }; 772 773 //------------------------------StoreCMNode----------------------------------- 774 // Store card-mark byte to memory for CM 775 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 776 // Preceeding equivalent StoreCMs may be eliminated. 777 class StoreCMNode : public StoreNode { 778 private: 779 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 780 virtual bool cmp( const Node &n ) const { 781 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 782 && StoreNode::cmp(n); 783 } 784 virtual uint size_of() const { return sizeof(*this); } 785 int _oop_alias_idx; // The alias_idx of OopStore 786 787 public: 788 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 789 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 790 _oop_alias_idx(oop_alias_idx) { 791 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 792 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 793 "bad oop alias idx"); 794 } 795 virtual int Opcode() const; 796 virtual Node* Identity(PhaseGVN* phase); 797 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 798 virtual const Type* Value(PhaseGVN* phase) const; 799 virtual BasicType memory_type() const { return T_VOID; } // unspecific 800 int oop_alias_idx() const { return _oop_alias_idx; } 801 }; 802 803 //------------------------------LoadPLockedNode--------------------------------- 804 // Load-locked a pointer from memory (either object or array). 805 // On Sparc & Intel this is implemented as a normal pointer load. 806 // On PowerPC and friends it's a real load-locked. 807 class LoadPLockedNode : public LoadPNode { 808 public: 809 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 810 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 811 virtual int Opcode() const; 812 virtual int store_Opcode() const { return Op_StorePConditional; } 813 virtual bool depends_only_on_test() const { return true; } 814 }; 815 816 //------------------------------SCMemProjNode--------------------------------------- 817 // This class defines a projection of the memory state of a store conditional node. 818 // These nodes return a value, but also update memory. 819 class SCMemProjNode : public ProjNode { 820 public: 821 enum {SCMEMPROJCON = (uint)-2}; 822 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 823 virtual int Opcode() const; 824 virtual bool is_CFG() const { return false; } 825 virtual const Type *bottom_type() const {return Type::MEMORY;} 826 virtual const TypePtr *adr_type() const { 827 Node* ctrl = in(0); 828 if (ctrl == NULL) return NULL; // node is dead 829 return ctrl->in(MemNode::Memory)->adr_type(); 830 } 831 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 832 virtual const Type* Value(PhaseGVN* phase) const; 833 #ifndef PRODUCT 834 virtual void dump_spec(outputStream *st) const {}; 835 #endif 836 }; 837 838 //------------------------------LoadStoreNode--------------------------- 839 // Note: is_Mem() method returns 'true' for this class. 840 class LoadStoreNode : public Node { 841 private: 842 const Type* const _type; // What kind of value is loaded? 843 const TypePtr* _adr_type; // What kind of memory is being addressed? 844 virtual uint size_of() const; // Size is bigger 845 public: 846 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 847 virtual bool depends_only_on_test() const { return false; } 848 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 849 850 virtual const Type *bottom_type() const { return _type; } 851 virtual uint ideal_reg() const; 852 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 853 854 bool result_not_used() const; 855 MemBarNode* trailing_membar() const; 856 }; 857 858 class LoadStoreConditionalNode : public LoadStoreNode { 859 public: 860 enum { 861 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 862 }; 863 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 864 }; 865 866 //------------------------------StorePConditionalNode--------------------------- 867 // Conditionally store pointer to memory, if no change since prior 868 // load-locked. Sets flags for success or failure of the store. 869 class StorePConditionalNode : public LoadStoreConditionalNode { 870 public: 871 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 872 virtual int Opcode() const; 873 // Produces flags 874 virtual uint ideal_reg() const { return Op_RegFlags; } 875 }; 876 877 //------------------------------StoreIConditionalNode--------------------------- 878 // Conditionally store int to memory, if no change since prior 879 // load-locked. Sets flags for success or failure of the store. 880 class StoreIConditionalNode : public LoadStoreConditionalNode { 881 public: 882 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 883 virtual int Opcode() const; 884 // Produces flags 885 virtual uint ideal_reg() const { return Op_RegFlags; } 886 }; 887 888 //------------------------------StoreLConditionalNode--------------------------- 889 // Conditionally store long to memory, if no change since prior 890 // load-locked. Sets flags for success or failure of the store. 891 class StoreLConditionalNode : public LoadStoreConditionalNode { 892 public: 893 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 894 virtual int Opcode() const; 895 // Produces flags 896 virtual uint ideal_reg() const { return Op_RegFlags; } 897 }; 898 899 class CompareAndSwapNode : public LoadStoreConditionalNode { 900 private: 901 const MemNode::MemOrd _mem_ord; 902 public: 903 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 904 MemNode::MemOrd order() const { 905 return _mem_ord; 906 } 907 }; 908 909 class CompareAndExchangeNode : public LoadStoreNode { 910 private: 911 const MemNode::MemOrd _mem_ord; 912 public: 913 enum { 914 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 915 }; 916 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 917 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 918 init_req(ExpectedIn, ex ); 919 } 920 921 MemNode::MemOrd order() const { 922 return _mem_ord; 923 } 924 }; 925 926 //------------------------------CompareAndSwapBNode--------------------------- 927 class CompareAndSwapBNode : public CompareAndSwapNode { 928 public: 929 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 930 virtual int Opcode() const; 931 }; 932 933 //------------------------------CompareAndSwapSNode--------------------------- 934 class CompareAndSwapSNode : public CompareAndSwapNode { 935 public: 936 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 937 virtual int Opcode() const; 938 }; 939 940 //------------------------------CompareAndSwapINode--------------------------- 941 class CompareAndSwapINode : public CompareAndSwapNode { 942 public: 943 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 944 virtual int Opcode() const; 945 }; 946 947 //------------------------------CompareAndSwapLNode--------------------------- 948 class CompareAndSwapLNode : public CompareAndSwapNode { 949 public: 950 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 951 virtual int Opcode() const; 952 }; 953 954 //------------------------------CompareAndSwapPNode--------------------------- 955 class CompareAndSwapPNode : public CompareAndSwapNode { 956 public: 957 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 958 virtual int Opcode() const; 959 }; 960 961 //------------------------------CompareAndSwapNNode--------------------------- 962 class CompareAndSwapNNode : public CompareAndSwapNode { 963 public: 964 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 965 virtual int Opcode() const; 966 }; 967 968 //------------------------------WeakCompareAndSwapBNode--------------------------- 969 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 970 public: 971 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 972 virtual int Opcode() const; 973 }; 974 975 //------------------------------WeakCompareAndSwapSNode--------------------------- 976 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 977 public: 978 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 979 virtual int Opcode() const; 980 }; 981 982 //------------------------------WeakCompareAndSwapINode--------------------------- 983 class WeakCompareAndSwapINode : public CompareAndSwapNode { 984 public: 985 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 986 virtual int Opcode() const; 987 }; 988 989 //------------------------------WeakCompareAndSwapLNode--------------------------- 990 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 991 public: 992 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 993 virtual int Opcode() const; 994 }; 995 996 //------------------------------WeakCompareAndSwapPNode--------------------------- 997 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 998 public: 999 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1000 virtual int Opcode() const; 1001 }; 1002 1003 //------------------------------WeakCompareAndSwapNNode--------------------------- 1004 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 1005 public: 1006 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1007 virtual int Opcode() const; 1008 }; 1009 1010 //------------------------------CompareAndExchangeBNode--------------------------- 1011 class CompareAndExchangeBNode : public CompareAndExchangeNode { 1012 public: 1013 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 1014 virtual int Opcode() const; 1015 }; 1016 1017 1018 //------------------------------CompareAndExchangeSNode--------------------------- 1019 class CompareAndExchangeSNode : public CompareAndExchangeNode { 1020 public: 1021 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 1022 virtual int Opcode() const; 1023 }; 1024 1025 //------------------------------CompareAndExchangeLNode--------------------------- 1026 class CompareAndExchangeLNode : public CompareAndExchangeNode { 1027 public: 1028 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 1029 virtual int Opcode() const; 1030 }; 1031 1032 1033 //------------------------------CompareAndExchangeINode--------------------------- 1034 class CompareAndExchangeINode : public CompareAndExchangeNode { 1035 public: 1036 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 1037 virtual int Opcode() const; 1038 }; 1039 1040 1041 //------------------------------CompareAndExchangePNode--------------------------- 1042 class CompareAndExchangePNode : public CompareAndExchangeNode { 1043 public: 1044 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1045 virtual int Opcode() const; 1046 }; 1047 1048 //------------------------------CompareAndExchangeNNode--------------------------- 1049 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1050 public: 1051 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1052 virtual int Opcode() const; 1053 }; 1054 1055 //------------------------------GetAndAddBNode--------------------------- 1056 class GetAndAddBNode : public LoadStoreNode { 1057 public: 1058 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1059 virtual int Opcode() const; 1060 }; 1061 1062 //------------------------------GetAndAddSNode--------------------------- 1063 class GetAndAddSNode : public LoadStoreNode { 1064 public: 1065 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1066 virtual int Opcode() const; 1067 }; 1068 1069 //------------------------------GetAndAddINode--------------------------- 1070 class GetAndAddINode : public LoadStoreNode { 1071 public: 1072 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1073 virtual int Opcode() const; 1074 }; 1075 1076 //------------------------------GetAndAddLNode--------------------------- 1077 class GetAndAddLNode : public LoadStoreNode { 1078 public: 1079 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1080 virtual int Opcode() const; 1081 }; 1082 1083 //------------------------------GetAndSetBNode--------------------------- 1084 class GetAndSetBNode : public LoadStoreNode { 1085 public: 1086 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1087 virtual int Opcode() const; 1088 }; 1089 1090 //------------------------------GetAndSetSNode--------------------------- 1091 class GetAndSetSNode : public LoadStoreNode { 1092 public: 1093 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1094 virtual int Opcode() const; 1095 }; 1096 1097 //------------------------------GetAndSetINode--------------------------- 1098 class GetAndSetINode : public LoadStoreNode { 1099 public: 1100 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1101 virtual int Opcode() const; 1102 }; 1103 1104 //------------------------------GetAndSetLNode--------------------------- 1105 class GetAndSetLNode : public LoadStoreNode { 1106 public: 1107 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1108 virtual int Opcode() const; 1109 }; 1110 1111 //------------------------------GetAndSetPNode--------------------------- 1112 class GetAndSetPNode : public LoadStoreNode { 1113 public: 1114 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1115 virtual int Opcode() const; 1116 }; 1117 1118 //------------------------------GetAndSetNNode--------------------------- 1119 class GetAndSetNNode : public LoadStoreNode { 1120 public: 1121 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1122 virtual int Opcode() const; 1123 }; 1124 1125 //------------------------------ClearArray------------------------------------- 1126 class ClearArrayNode: public Node { 1127 private: 1128 bool _is_large; 1129 bool _word_copy_only; 1130 public: 1131 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large) 1132 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large), 1133 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) { 1134 init_class_id(Class_ClearArray); 1135 } 1136 virtual int Opcode() const; 1137 virtual const Type *bottom_type() const { return Type::MEMORY; } 1138 // ClearArray modifies array elements, and so affects only the 1139 // array memory addressed by the bottom_type of its base address. 1140 virtual const class TypePtr *adr_type() const; 1141 virtual Node* Identity(PhaseGVN* phase); 1142 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1143 virtual uint match_edge(uint idx) const; 1144 bool is_large() const { return _is_large; } 1145 bool word_copy_only() const { return _word_copy_only; } 1146 1147 // Clear the given area of an object or array. 1148 // The start offset must always be aligned mod BytesPerInt. 1149 // The end offset must always be aligned mod BytesPerLong. 1150 // Return the new memory. 1151 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1152 Node* val, 1153 Node* raw_val, 1154 intptr_t start_offset, 1155 intptr_t end_offset, 1156 PhaseGVN* phase); 1157 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1158 Node* val, 1159 Node* raw_val, 1160 intptr_t start_offset, 1161 Node* end_offset, 1162 PhaseGVN* phase); 1163 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1164 Node* raw_val, 1165 Node* start_offset, 1166 Node* end_offset, 1167 PhaseGVN* phase); 1168 // Return allocation input memory edge if it is different instance 1169 // or itself if it is the one we are looking for. 1170 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1171 }; 1172 1173 //------------------------------MemBar----------------------------------------- 1174 // There are different flavors of Memory Barriers to match the Java Memory 1175 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1176 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1177 // volatile-load. Monitor-exit and volatile-store act as Release: no 1178 // preceding ref can be moved to after them. We insert a MemBar-Release 1179 // before a FastUnlock or volatile-store. All volatiles need to be 1180 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1181 // separate it from any following volatile-load. 1182 class MemBarNode: public MultiNode { 1183 virtual uint hash() const ; // { return NO_HASH; } 1184 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1185 1186 virtual uint size_of() const { return sizeof(*this); } 1187 // Memory type this node is serializing. Usually either rawptr or bottom. 1188 const TypePtr* _adr_type; 1189 1190 // How is this membar related to a nearby memory access? 1191 enum { 1192 Standalone, 1193 TrailingLoad, 1194 TrailingStore, 1195 LeadingStore, 1196 TrailingLoadStore, 1197 LeadingLoadStore 1198 } _kind; 1199 1200 #ifdef ASSERT 1201 uint _pair_idx; 1202 #endif 1203 1204 public: 1205 enum { 1206 Precedent = TypeFunc::Parms // optional edge to force precedence 1207 }; 1208 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1209 virtual int Opcode() const = 0; 1210 virtual const class TypePtr *adr_type() const { return _adr_type; } 1211 virtual const Type* Value(PhaseGVN* phase) const; 1212 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1213 virtual uint match_edge(uint idx) const { return 0; } 1214 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1215 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 1216 // Factory method. Builds a wide or narrow membar. 1217 // Optional 'precedent' becomes an extra edge if not null. 1218 static MemBarNode* make(Compile* C, int opcode, 1219 int alias_idx = Compile::AliasIdxBot, 1220 Node* precedent = NULL); 1221 1222 MemBarNode* trailing_membar() const; 1223 MemBarNode* leading_membar() const; 1224 1225 void set_trailing_load() { _kind = TrailingLoad; } 1226 bool trailing_load() const { return _kind == TrailingLoad; } 1227 bool trailing_store() const { return _kind == TrailingStore; } 1228 bool leading_store() const { return _kind == LeadingStore; } 1229 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1230 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1231 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1232 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1233 bool standalone() const { return _kind == Standalone; } 1234 1235 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1236 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1237 1238 void remove(PhaseIterGVN *igvn); 1239 }; 1240 1241 // "Acquire" - no following ref can move before (but earlier refs can 1242 // follow, like an early Load stalled in cache). Requires multi-cpu 1243 // visibility. Inserted after a volatile load. 1244 class MemBarAcquireNode: public MemBarNode { 1245 public: 1246 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1247 : MemBarNode(C, alias_idx, precedent) {} 1248 virtual int Opcode() const; 1249 }; 1250 1251 // "Acquire" - no following ref can move before (but earlier refs can 1252 // follow, like an early Load stalled in cache). Requires multi-cpu 1253 // visibility. Inserted independ of any load, as required 1254 // for intrinsic Unsafe.loadFence(). 1255 class LoadFenceNode: public MemBarNode { 1256 public: 1257 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1258 : MemBarNode(C, alias_idx, precedent) {} 1259 virtual int Opcode() const; 1260 }; 1261 1262 // "Release" - no earlier ref can move after (but later refs can move 1263 // up, like a speculative pipelined cache-hitting Load). Requires 1264 // multi-cpu visibility. Inserted before a volatile store. 1265 class MemBarReleaseNode: public MemBarNode { 1266 public: 1267 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1268 : MemBarNode(C, alias_idx, precedent) {} 1269 virtual int Opcode() const; 1270 }; 1271 1272 // "Release" - no earlier ref can move after (but later refs can move 1273 // up, like a speculative pipelined cache-hitting Load). Requires 1274 // multi-cpu visibility. Inserted independent of any store, as required 1275 // for intrinsic Unsafe.storeFence(). 1276 class StoreFenceNode: public MemBarNode { 1277 public: 1278 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1279 : MemBarNode(C, alias_idx, precedent) {} 1280 virtual int Opcode() const; 1281 }; 1282 1283 // "Acquire" - no following ref can move before (but earlier refs can 1284 // follow, like an early Load stalled in cache). Requires multi-cpu 1285 // visibility. Inserted after a FastLock. 1286 class MemBarAcquireLockNode: public MemBarNode { 1287 public: 1288 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1289 : MemBarNode(C, alias_idx, precedent) {} 1290 virtual int Opcode() const; 1291 }; 1292 1293 // "Release" - no earlier ref can move after (but later refs can move 1294 // up, like a speculative pipelined cache-hitting Load). Requires 1295 // multi-cpu visibility. Inserted before a FastUnLock. 1296 class MemBarReleaseLockNode: public MemBarNode { 1297 public: 1298 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1299 : MemBarNode(C, alias_idx, precedent) {} 1300 virtual int Opcode() const; 1301 }; 1302 1303 class MemBarStoreStoreNode: public MemBarNode { 1304 public: 1305 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1306 : MemBarNode(C, alias_idx, precedent) { 1307 init_class_id(Class_MemBarStoreStore); 1308 } 1309 virtual int Opcode() const; 1310 }; 1311 1312 // Ordering between a volatile store and a following volatile load. 1313 // Requires multi-CPU visibility? 1314 class MemBarVolatileNode: public MemBarNode { 1315 public: 1316 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1317 : MemBarNode(C, alias_idx, precedent) {} 1318 virtual int Opcode() const; 1319 }; 1320 1321 // Ordering within the same CPU. Used to order unsafe memory references 1322 // inside the compiler when we lack alias info. Not needed "outside" the 1323 // compiler because the CPU does all the ordering for us. 1324 class MemBarCPUOrderNode: public MemBarNode { 1325 public: 1326 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1327 : MemBarNode(C, alias_idx, precedent) {} 1328 virtual int Opcode() const; 1329 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1330 }; 1331 1332 class OnSpinWaitNode: public MemBarNode { 1333 public: 1334 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1335 : MemBarNode(C, alias_idx, precedent) {} 1336 virtual int Opcode() const; 1337 }; 1338 1339 // Isolation of object setup after an AllocateNode and before next safepoint. 1340 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1341 class InitializeNode: public MemBarNode { 1342 friend class AllocateNode; 1343 1344 enum { 1345 Incomplete = 0, 1346 Complete = 1, 1347 WithArraycopy = 2, 1348 }; 1349 int _is_complete; 1350 1351 bool _does_not_escape; 1352 1353 public: 1354 enum { 1355 Control = TypeFunc::Control, 1356 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1357 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1358 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1359 }; 1360 1361 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1362 virtual int Opcode() const; 1363 virtual uint size_of() const { return sizeof(*this); } 1364 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1365 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1366 1367 // Manage incoming memory edges via a MergeMem on in(Memory): 1368 Node* memory(uint alias_idx); 1369 1370 // The raw memory edge coming directly from the Allocation. 1371 // The contents of this memory are *always* all-zero-bits. 1372 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1373 1374 // Return the corresponding allocation for this initialization (or null if none). 1375 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1376 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1377 AllocateNode* allocation(); 1378 1379 // Anything other than zeroing in this init? 1380 bool is_non_zero(); 1381 1382 // An InitializeNode must completed before macro expansion is done. 1383 // Completion requires that the AllocateNode must be followed by 1384 // initialization of the new memory to zero, then to any initializers. 1385 bool is_complete() { return _is_complete != Incomplete; } 1386 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1387 1388 // Mark complete. (Must not yet be complete.) 1389 void set_complete(PhaseGVN* phase); 1390 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1391 1392 bool does_not_escape() { return _does_not_escape; } 1393 void set_does_not_escape() { _does_not_escape = true; } 1394 1395 #ifdef ASSERT 1396 // ensure all non-degenerate stores are ordered and non-overlapping 1397 bool stores_are_sane(PhaseTransform* phase); 1398 #endif //ASSERT 1399 1400 // See if this store can be captured; return offset where it initializes. 1401 // Return 0 if the store cannot be moved (any sort of problem). 1402 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1403 1404 // Capture another store; reformat it to write my internal raw memory. 1405 // Return the captured copy, else NULL if there is some sort of problem. 1406 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1407 1408 // Find captured store which corresponds to the range [start..start+size). 1409 // Return my own memory projection (meaning the initial zero bits) 1410 // if there is no such store. Return NULL if there is a problem. 1411 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1412 1413 // Called when the associated AllocateNode is expanded into CFG. 1414 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1415 intptr_t header_size, Node* size_in_bytes, 1416 PhaseGVN* phase); 1417 1418 private: 1419 void remove_extra_zeroes(); 1420 1421 // Find out where a captured store should be placed (or already is placed). 1422 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1423 PhaseTransform* phase); 1424 1425 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1426 1427 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1428 1429 bool detect_init_independence(Node* n, int& count); 1430 1431 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1432 PhaseGVN* phase); 1433 1434 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1435 }; 1436 1437 //------------------------------MergeMem--------------------------------------- 1438 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1439 class MergeMemNode: public Node { 1440 virtual uint hash() const ; // { return NO_HASH; } 1441 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1442 friend class MergeMemStream; 1443 MergeMemNode(Node* def); // clients use MergeMemNode::make 1444 1445 public: 1446 // If the input is a whole memory state, clone it with all its slices intact. 1447 // Otherwise, make a new memory state with just that base memory input. 1448 // In either case, the result is a newly created MergeMem. 1449 static MergeMemNode* make(Node* base_memory); 1450 1451 virtual int Opcode() const; 1452 virtual Node* Identity(PhaseGVN* phase); 1453 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1454 virtual uint ideal_reg() const { return NotAMachineReg; } 1455 virtual uint match_edge(uint idx) const { return 0; } 1456 virtual const RegMask &out_RegMask() const; 1457 virtual const Type *bottom_type() const { return Type::MEMORY; } 1458 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1459 // sparse accessors 1460 // Fetch the previously stored "set_memory_at", or else the base memory. 1461 // (Caller should clone it if it is a phi-nest.) 1462 Node* memory_at(uint alias_idx) const; 1463 // set the memory, regardless of its previous value 1464 void set_memory_at(uint alias_idx, Node* n); 1465 // the "base" is the memory that provides the non-finite support 1466 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1467 // warning: setting the base can implicitly set any of the other slices too 1468 void set_base_memory(Node* def); 1469 // sentinel value which denotes a copy of the base memory: 1470 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1471 static Node* make_empty_memory(); // where the sentinel comes from 1472 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1473 // hook for the iterator, to perform any necessary setup 1474 void iteration_setup(const MergeMemNode* other = NULL); 1475 // push sentinels until I am at least as long as the other (semantic no-op) 1476 void grow_to_match(const MergeMemNode* other); 1477 bool verify_sparse() const PRODUCT_RETURN0; 1478 #ifndef PRODUCT 1479 virtual void dump_spec(outputStream *st) const; 1480 #endif 1481 }; 1482 1483 class MergeMemStream : public StackObj { 1484 private: 1485 MergeMemNode* _mm; 1486 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1487 Node* _mm_base; // loop-invariant base memory of _mm 1488 int _idx; 1489 int _cnt; 1490 Node* _mem; 1491 Node* _mem2; 1492 int _cnt2; 1493 1494 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1495 // subsume_node will break sparseness at times, whenever a memory slice 1496 // folds down to a copy of the base ("fat") memory. In such a case, 1497 // the raw edge will update to base, although it should be top. 1498 // This iterator will recognize either top or base_memory as an 1499 // "empty" slice. See is_empty, is_empty2, and next below. 1500 // 1501 // The sparseness property is repaired in MergeMemNode::Ideal. 1502 // As long as access to a MergeMem goes through this iterator 1503 // or the memory_at accessor, flaws in the sparseness will 1504 // never be observed. 1505 // 1506 // Also, iteration_setup repairs sparseness. 1507 assert(mm->verify_sparse(), "please, no dups of base"); 1508 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1509 1510 _mm = mm; 1511 _mm_base = mm->base_memory(); 1512 _mm2 = mm2; 1513 _cnt = mm->req(); 1514 _idx = Compile::AliasIdxBot-1; // start at the base memory 1515 _mem = NULL; 1516 _mem2 = NULL; 1517 } 1518 1519 #ifdef ASSERT 1520 Node* check_memory() const { 1521 if (at_base_memory()) 1522 return _mm->base_memory(); 1523 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1524 return _mm->memory_at(_idx); 1525 else 1526 return _mm_base; 1527 } 1528 Node* check_memory2() const { 1529 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1530 } 1531 #endif 1532 1533 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1534 void assert_synch() const { 1535 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1536 "no side-effects except through the stream"); 1537 } 1538 1539 public: 1540 1541 // expected usages: 1542 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1543 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1544 1545 // iterate over one merge 1546 MergeMemStream(MergeMemNode* mm) { 1547 mm->iteration_setup(); 1548 init(mm); 1549 debug_only(_cnt2 = 999); 1550 } 1551 // iterate in parallel over two merges 1552 // only iterates through non-empty elements of mm2 1553 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1554 assert(mm2, "second argument must be a MergeMem also"); 1555 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1556 mm->iteration_setup(mm2); 1557 init(mm, mm2); 1558 _cnt2 = mm2->req(); 1559 } 1560 #ifdef ASSERT 1561 ~MergeMemStream() { 1562 assert_synch(); 1563 } 1564 #endif 1565 1566 MergeMemNode* all_memory() const { 1567 return _mm; 1568 } 1569 Node* base_memory() const { 1570 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1571 return _mm_base; 1572 } 1573 const MergeMemNode* all_memory2() const { 1574 assert(_mm2 != NULL, ""); 1575 return _mm2; 1576 } 1577 bool at_base_memory() const { 1578 return _idx == Compile::AliasIdxBot; 1579 } 1580 int alias_idx() const { 1581 assert(_mem, "must call next 1st"); 1582 return _idx; 1583 } 1584 1585 const TypePtr* adr_type() const { 1586 return Compile::current()->get_adr_type(alias_idx()); 1587 } 1588 1589 const TypePtr* adr_type(Compile* C) const { 1590 return C->get_adr_type(alias_idx()); 1591 } 1592 bool is_empty() const { 1593 assert(_mem, "must call next 1st"); 1594 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1595 return _mem->is_top(); 1596 } 1597 bool is_empty2() const { 1598 assert(_mem2, "must call next 1st"); 1599 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1600 return _mem2->is_top(); 1601 } 1602 Node* memory() const { 1603 assert(!is_empty(), "must not be empty"); 1604 assert_synch(); 1605 return _mem; 1606 } 1607 // get the current memory, regardless of empty or non-empty status 1608 Node* force_memory() const { 1609 assert(!is_empty() || !at_base_memory(), ""); 1610 // Use _mm_base to defend against updates to _mem->base_memory(). 1611 Node *mem = _mem->is_top() ? _mm_base : _mem; 1612 assert(mem == check_memory(), ""); 1613 return mem; 1614 } 1615 Node* memory2() const { 1616 assert(_mem2 == check_memory2(), ""); 1617 return _mem2; 1618 } 1619 void set_memory(Node* mem) { 1620 if (at_base_memory()) { 1621 // Note that this does not change the invariant _mm_base. 1622 _mm->set_base_memory(mem); 1623 } else { 1624 _mm->set_memory_at(_idx, mem); 1625 } 1626 _mem = mem; 1627 assert_synch(); 1628 } 1629 1630 // Recover from a side effect to the MergeMemNode. 1631 void set_memory() { 1632 _mem = _mm->in(_idx); 1633 } 1634 1635 bool next() { return next(false); } 1636 bool next2() { return next(true); } 1637 1638 bool next_non_empty() { return next_non_empty(false); } 1639 bool next_non_empty2() { return next_non_empty(true); } 1640 // next_non_empty2 can yield states where is_empty() is true 1641 1642 private: 1643 // find the next item, which might be empty 1644 bool next(bool have_mm2) { 1645 assert((_mm2 != NULL) == have_mm2, "use other next"); 1646 assert_synch(); 1647 if (++_idx < _cnt) { 1648 // Note: This iterator allows _mm to be non-sparse. 1649 // It behaves the same whether _mem is top or base_memory. 1650 _mem = _mm->in(_idx); 1651 if (have_mm2) 1652 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1653 return true; 1654 } 1655 return false; 1656 } 1657 1658 // find the next non-empty item 1659 bool next_non_empty(bool have_mm2) { 1660 while (next(have_mm2)) { 1661 if (!is_empty()) { 1662 // make sure _mem2 is filled in sensibly 1663 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1664 return true; 1665 } else if (have_mm2 && !is_empty2()) { 1666 return true; // is_empty() == true 1667 } 1668 } 1669 return false; 1670 } 1671 }; 1672 1673 //------------------------------Prefetch--------------------------------------- 1674 1675 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1676 class PrefetchAllocationNode : public Node { 1677 public: 1678 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1679 virtual int Opcode() const; 1680 virtual uint ideal_reg() const { return NotAMachineReg; } 1681 virtual uint match_edge(uint idx) const { return idx==2; } 1682 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1683 }; 1684 1685 #endif // SHARE_OPTO_MEMNODE_HPP