1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MEMNODE_HPP 26 #define SHARE_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 bool _unsafe_access; // Access of unsafe origin. 46 protected: 47 #ifdef ASSERT 48 const TypePtr* _adr_type; // What kind of memory is being addressed? 49 #endif 50 virtual uint size_of() const; 51 public: 52 enum { Control, // When is it safe to do this load? 53 Memory, // Chunk of memory is being loaded from 54 Address, // Actually address, derived from base 55 ValueIn, // Value to store 56 OopStore // Preceeding oop store, only in StoreCM 57 }; 58 typedef enum { unordered = 0, 59 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 60 release, // Store has to release or be preceded by MemBarRelease. 61 seqcst, // LoadStore has to have both acquire and release semantics. 62 unset // The memory ordering is not set (used for testing) 63 } MemOrd; 64 protected: 65 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 66 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 67 init_class_id(Class_Mem); 68 debug_only(_adr_type=at; adr_type();) 69 } 70 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 71 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 72 init_class_id(Class_Mem); 73 debug_only(_adr_type=at; adr_type();) 74 } 75 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 76 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 77 init_class_id(Class_Mem); 78 debug_only(_adr_type=at; adr_type();) 79 } 80 81 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 82 static bool check_if_adr_maybe_raw(Node* adr); 83 84 public: 85 // Helpers for the optimizer. Documented in memnode.cpp. 86 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 87 Node* p2, AllocateNode* a2, 88 PhaseTransform* phase); 89 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 90 91 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 92 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 93 // This one should probably be a phase-specific function: 94 static bool all_controls_dominate(Node* dom, Node* sub); 95 96 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 97 98 // Shared code for Ideal methods: 99 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 100 101 // Helper function for adr_type() implementations. 102 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 103 104 // Raw access function, to allow copying of adr_type efficiently in 105 // product builds and retain the debug info for debug builds. 106 const TypePtr *raw_adr_type() const { 107 #ifdef ASSERT 108 return _adr_type; 109 #else 110 return 0; 111 #endif 112 } 113 114 // Map a load or store opcode to its corresponding store opcode. 115 // (Return -1 if unknown.) 116 virtual int store_Opcode() const { return -1; } 117 118 // What is the type of the value in memory? (T_VOID mean "unspecified".) 119 virtual BasicType memory_type() const = 0; 120 virtual int memory_size() const { 121 #ifdef ASSERT 122 return type2aelembytes(memory_type(), true); 123 #else 124 return type2aelembytes(memory_type()); 125 #endif 126 } 127 128 // Search through memory states which precede this node (load or store). 129 // Look for an exact match for the address, with no intervening 130 // aliased stores. 131 Node* find_previous_store(PhaseTransform* phase); 132 133 // Can this node (load or store) accurately see a stored value in 134 // the given memory state? (The state may or may not be in(Memory).) 135 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 136 137 void set_unaligned_access() { _unaligned_access = true; } 138 bool is_unaligned_access() const { return _unaligned_access; } 139 void set_mismatched_access() { _mismatched_access = true; } 140 bool is_mismatched_access() const { return _mismatched_access; } 141 void set_unsafe_access() { _unsafe_access = true; } 142 bool is_unsafe_access() const { return _unsafe_access; } 143 144 #ifndef PRODUCT 145 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 146 virtual void dump_spec(outputStream *st) const; 147 #endif 148 }; 149 150 //------------------------------LoadNode--------------------------------------- 151 // Load value; requires Memory and Address 152 class LoadNode : public MemNode { 153 public: 154 // Some loads (from unsafe) should be pinned: they don't depend only 155 // on the dominating test. The field _control_dependency below records 156 // whether that node depends only on the dominating test. 157 // Pinned and UnknownControl are similar, but differ in that Pinned 158 // loads are not allowed to float across safepoints, whereas UnknownControl 159 // loads are allowed to do that. Therefore, Pinned is stricter. 160 enum ControlDependency { 161 Pinned, 162 UnknownControl, 163 DependsOnlyOnTest 164 }; 165 166 private: 167 // LoadNode::hash() doesn't take the _control_dependency field 168 // into account: If the graph already has a non-pinned LoadNode and 169 // we add a pinned LoadNode with the same inputs, it's safe for GVN 170 // to replace the pinned LoadNode with the non-pinned LoadNode, 171 // otherwise it wouldn't be safe to have a non pinned LoadNode with 172 // those inputs in the first place. If the graph already has a 173 // pinned LoadNode and we add a non pinned LoadNode with the same 174 // inputs, it's safe (but suboptimal) for GVN to replace the 175 // non-pinned LoadNode by the pinned LoadNode. 176 ControlDependency _control_dependency; 177 178 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 179 // loads that can be reordered, and such requiring acquire semantics to 180 // adhere to the Java specification. The required behaviour is stored in 181 // this field. 182 const MemOrd _mo; 183 184 uint _barrier; // Bit field with barrier information 185 186 AllocateNode* is_new_object_mark_load(PhaseGVN *phase) const; 187 188 protected: 189 virtual bool cmp(const Node &n) const; 190 virtual uint size_of() const; // Size is bigger 191 // Should LoadNode::Ideal() attempt to remove control edges? 192 virtual bool can_remove_control() const; 193 const Type* const _type; // What kind of value is loaded? 194 195 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 196 public: 197 198 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 199 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _barrier(0), _type(rt) { 200 init_class_id(Class_Load); 201 } 202 inline bool is_unordered() const { return !is_acquire(); } 203 inline bool is_acquire() const { 204 assert(_mo == unordered || _mo == acquire, "unexpected"); 205 return _mo == acquire; 206 } 207 inline bool is_unsigned() const { 208 int lop = Opcode(); 209 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 210 } 211 212 // Polymorphic factory method: 213 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 214 const TypePtr* at, const Type *rt, BasicType bt, 215 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 216 bool unaligned = false, bool mismatched = false, bool unsafe = false); 217 218 virtual uint hash() const; // Check the type 219 220 // Handle algebraic identities here. If we have an identity, return the Node 221 // we are equivalent to. We look for Load of a Store. 222 virtual Node* Identity(PhaseGVN* phase); 223 224 // If the load is from Field memory and the pointer is non-null, it might be possible to 225 // zero out the control input. 226 // If the offset is constant and the base is an object allocation, 227 // try to hook me up to the exact initializing store. 228 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 229 230 // Split instance field load through Phi. 231 Node* split_through_phi(PhaseGVN *phase); 232 233 // Recover original value from boxed values 234 Node *eliminate_autobox(PhaseGVN *phase); 235 236 // Compute a new Type for this node. Basically we just do the pre-check, 237 // then call the virtual add() to set the type. 238 virtual const Type* Value(PhaseGVN* phase) const; 239 240 // Common methods for LoadKlass and LoadNKlass nodes. 241 const Type* klass_value_common(PhaseGVN* phase) const; 242 Node* klass_identity_common(PhaseGVN* phase); 243 244 virtual uint ideal_reg() const; 245 virtual const Type *bottom_type() const; 246 // Following method is copied from TypeNode: 247 void set_type(const Type* t) { 248 assert(t != NULL, "sanity"); 249 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 250 *(const Type**)&_type = t; // cast away const-ness 251 // If this node is in the hash table, make sure it doesn't need a rehash. 252 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 253 } 254 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 255 256 // Do not match memory edge 257 virtual uint match_edge(uint idx) const; 258 259 // Map a load opcode to its corresponding store opcode. 260 virtual int store_Opcode() const = 0; 261 262 // Check if the load's memory input is a Phi node with the same control. 263 bool is_instance_field_load_with_local_phi(Node* ctrl); 264 265 Node* convert_to_unsigned_load(PhaseGVN& gvn); 266 Node* convert_to_signed_load(PhaseGVN& gvn); 267 268 void copy_barrier_info(const Node* src) { _barrier = src->as_Load()->_barrier; } 269 uint barrier_data() { return _barrier; } 270 void set_barrier_data(uint barrier_data) { _barrier |= barrier_data; } 271 272 void pin() { _control_dependency = Pinned; } 273 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 274 275 #ifndef PRODUCT 276 virtual void dump_spec(outputStream *st) const; 277 #endif 278 #ifdef ASSERT 279 // Helper function to allow a raw load without control edge for some cases 280 static bool is_immutable_value(Node* adr); 281 #endif 282 protected: 283 const Type* load_array_final_field(const TypeKlassPtr *tkls, 284 ciKlass* klass) const; 285 286 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 287 288 // depends_only_on_test is almost always true, and needs to be almost always 289 // true to enable key hoisting & commoning optimizations. However, for the 290 // special case of RawPtr loads from TLS top & end, and other loads performed by 291 // GC barriers, the control edge carries the dependence preventing hoisting past 292 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 293 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 294 // which produce results (new raw memory state) inside of loops preventing all 295 // manner of other optimizations). Basically, it's ugly but so is the alternative. 296 // See comment in macro.cpp, around line 125 expand_allocate_common(). 297 virtual bool depends_only_on_test() const { 298 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 299 } 300 }; 301 302 //------------------------------LoadBNode-------------------------------------- 303 // Load a byte (8bits signed) from memory 304 class LoadBNode : public LoadNode { 305 public: 306 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 307 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 308 virtual int Opcode() const; 309 virtual uint ideal_reg() const { return Op_RegI; } 310 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 311 virtual const Type* Value(PhaseGVN* phase) const; 312 virtual int store_Opcode() const { return Op_StoreB; } 313 virtual BasicType memory_type() const { return T_BYTE; } 314 }; 315 316 //------------------------------LoadUBNode------------------------------------- 317 // Load a unsigned byte (8bits unsigned) from memory 318 class LoadUBNode : public LoadNode { 319 public: 320 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 321 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 322 virtual int Opcode() const; 323 virtual uint ideal_reg() const { return Op_RegI; } 324 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 325 virtual const Type* Value(PhaseGVN* phase) const; 326 virtual int store_Opcode() const { return Op_StoreB; } 327 virtual BasicType memory_type() const { return T_BYTE; } 328 }; 329 330 //------------------------------LoadUSNode------------------------------------- 331 // Load an unsigned short/char (16bits unsigned) from memory 332 class LoadUSNode : public LoadNode { 333 public: 334 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 335 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 336 virtual int Opcode() const; 337 virtual uint ideal_reg() const { return Op_RegI; } 338 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 339 virtual const Type* Value(PhaseGVN* phase) const; 340 virtual int store_Opcode() const { return Op_StoreC; } 341 virtual BasicType memory_type() const { return T_CHAR; } 342 }; 343 344 //------------------------------LoadSNode-------------------------------------- 345 // Load a short (16bits signed) from memory 346 class LoadSNode : public LoadNode { 347 public: 348 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 349 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 350 virtual int Opcode() const; 351 virtual uint ideal_reg() const { return Op_RegI; } 352 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 353 virtual const Type* Value(PhaseGVN* phase) const; 354 virtual int store_Opcode() const { return Op_StoreC; } 355 virtual BasicType memory_type() const { return T_SHORT; } 356 }; 357 358 //------------------------------LoadINode-------------------------------------- 359 // Load an integer from memory 360 class LoadINode : public LoadNode { 361 public: 362 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 363 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 364 virtual int Opcode() const; 365 virtual uint ideal_reg() const { return Op_RegI; } 366 virtual int store_Opcode() const { return Op_StoreI; } 367 virtual BasicType memory_type() const { return T_INT; } 368 }; 369 370 //------------------------------LoadRangeNode---------------------------------- 371 // Load an array length from the array 372 class LoadRangeNode : public LoadINode { 373 public: 374 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 375 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 376 virtual int Opcode() const; 377 virtual const Type* Value(PhaseGVN* phase) const; 378 virtual Node* Identity(PhaseGVN* phase); 379 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 380 }; 381 382 //------------------------------LoadLNode-------------------------------------- 383 // Load a long from memory 384 class LoadLNode : public LoadNode { 385 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 386 virtual bool cmp( const Node &n ) const { 387 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 388 && LoadNode::cmp(n); 389 } 390 virtual uint size_of() const { return sizeof(*this); } 391 const bool _require_atomic_access; // is piecewise load forbidden? 392 393 public: 394 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 395 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 396 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 397 virtual int Opcode() const; 398 virtual uint ideal_reg() const { return Op_RegL; } 399 virtual int store_Opcode() const { return Op_StoreL; } 400 virtual BasicType memory_type() const { return T_LONG; } 401 bool require_atomic_access() const { return _require_atomic_access; } 402 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 403 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 404 bool unaligned = false, bool mismatched = false, bool unsafe = false); 405 #ifndef PRODUCT 406 virtual void dump_spec(outputStream *st) const { 407 LoadNode::dump_spec(st); 408 if (_require_atomic_access) st->print(" Atomic!"); 409 } 410 #endif 411 }; 412 413 //------------------------------LoadL_unalignedNode---------------------------- 414 // Load a long from unaligned memory 415 class LoadL_unalignedNode : public LoadLNode { 416 public: 417 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 418 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 419 virtual int Opcode() const; 420 }; 421 422 //------------------------------LoadFNode-------------------------------------- 423 // Load a float (64 bits) from memory 424 class LoadFNode : public LoadNode { 425 public: 426 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 427 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 428 virtual int Opcode() const; 429 virtual uint ideal_reg() const { return Op_RegF; } 430 virtual int store_Opcode() const { return Op_StoreF; } 431 virtual BasicType memory_type() const { return T_FLOAT; } 432 }; 433 434 //------------------------------LoadDNode-------------------------------------- 435 // Load a double (64 bits) from memory 436 class LoadDNode : public LoadNode { 437 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 438 virtual bool cmp( const Node &n ) const { 439 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 440 && LoadNode::cmp(n); 441 } 442 virtual uint size_of() const { return sizeof(*this); } 443 const bool _require_atomic_access; // is piecewise load forbidden? 444 445 public: 446 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 447 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 448 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 449 virtual int Opcode() const; 450 virtual uint ideal_reg() const { return Op_RegD; } 451 virtual int store_Opcode() const { return Op_StoreD; } 452 virtual BasicType memory_type() const { return T_DOUBLE; } 453 bool require_atomic_access() const { return _require_atomic_access; } 454 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 455 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 456 bool unaligned = false, bool mismatched = false, bool unsafe = false); 457 #ifndef PRODUCT 458 virtual void dump_spec(outputStream *st) const { 459 LoadNode::dump_spec(st); 460 if (_require_atomic_access) st->print(" Atomic!"); 461 } 462 #endif 463 }; 464 465 //------------------------------LoadD_unalignedNode---------------------------- 466 // Load a double from unaligned memory 467 class LoadD_unalignedNode : public LoadDNode { 468 public: 469 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 470 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 471 virtual int Opcode() const; 472 }; 473 474 //------------------------------LoadPNode-------------------------------------- 475 // Load a pointer from memory (either object or array) 476 class LoadPNode : public LoadNode { 477 public: 478 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 479 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 480 virtual int Opcode() const; 481 virtual uint ideal_reg() const { return Op_RegP; } 482 virtual int store_Opcode() const { return Op_StoreP; } 483 virtual BasicType memory_type() const { return T_ADDRESS; } 484 }; 485 486 487 //------------------------------LoadNNode-------------------------------------- 488 // Load a narrow oop from memory (either object or array) 489 class LoadNNode : public LoadNode { 490 public: 491 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 492 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 493 virtual int Opcode() const; 494 virtual uint ideal_reg() const { return Op_RegN; } 495 virtual int store_Opcode() const { return Op_StoreN; } 496 virtual BasicType memory_type() const { return T_NARROWOOP; } 497 }; 498 499 //------------------------------LoadKlassNode---------------------------------- 500 // Load a Klass from an object 501 class LoadKlassNode : public LoadPNode { 502 protected: 503 // In most cases, LoadKlassNode does not have the control input set. If the control 504 // input is set, it must not be removed (by LoadNode::Ideal()). 505 virtual bool can_remove_control() const; 506 public: 507 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 508 : LoadPNode(c, mem, adr, at, tk, mo) {} 509 virtual int Opcode() const; 510 virtual const Type* Value(PhaseGVN* phase) const; 511 virtual Node* Identity(PhaseGVN* phase); 512 virtual bool depends_only_on_test() const { return true; } 513 514 // Polymorphic factory method: 515 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 516 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 517 }; 518 519 //------------------------------LoadNKlassNode--------------------------------- 520 // Load a narrow Klass from an object. 521 class LoadNKlassNode : public LoadNNode { 522 public: 523 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 524 : LoadNNode(c, mem, adr, at, tk, mo) {} 525 virtual int Opcode() const; 526 virtual uint ideal_reg() const { return Op_RegN; } 527 virtual int store_Opcode() const { return Op_StoreNKlass; } 528 virtual BasicType memory_type() const { return T_NARROWKLASS; } 529 530 virtual const Type* Value(PhaseGVN* phase) const; 531 virtual Node* Identity(PhaseGVN* phase); 532 virtual bool depends_only_on_test() const { return true; } 533 }; 534 535 536 //------------------------------StoreNode-------------------------------------- 537 // Store value; requires Store, Address and Value 538 class StoreNode : public MemNode { 539 private: 540 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 541 // stores that can be reordered, and such requiring release semantics to 542 // adhere to the Java specification. The required behaviour is stored in 543 // this field. 544 const MemOrd _mo; 545 // Needed for proper cloning. 546 virtual uint size_of() const { return sizeof(*this); } 547 protected: 548 virtual bool cmp( const Node &n ) const; 549 virtual bool depends_only_on_test() const { return false; } 550 551 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 552 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 553 554 public: 555 // We must ensure that stores of object references will be visible 556 // only after the object's initialization. So the callers of this 557 // procedure must indicate that the store requires `release' 558 // semantics, if the stored value is an object reference that might 559 // point to a new object and may become externally visible. 560 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 561 : MemNode(c, mem, adr, at, val), _mo(mo) { 562 init_class_id(Class_Store); 563 } 564 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 565 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 566 init_class_id(Class_Store); 567 } 568 569 inline bool is_unordered() const { return !is_release(); } 570 inline bool is_release() const { 571 assert((_mo == unordered || _mo == release), "unexpected"); 572 return _mo == release; 573 } 574 575 // Conservatively release stores of object references in order to 576 // ensure visibility of object initialization. 577 static inline MemOrd release_if_reference(const BasicType t) { 578 #ifdef AARCH64 579 // AArch64 doesn't need a release store here because object 580 // initialization contains the necessary barriers. 581 return unordered; 582 #else 583 const MemOrd mo = (t == T_ARRAY || 584 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 585 t == T_OBJECT) ? release : unordered; 586 return mo; 587 #endif 588 } 589 590 // Polymorphic factory method 591 // 592 // We must ensure that stores of object references will be visible 593 // only after the object's initialization. So the callers of this 594 // procedure must indicate that the store requires `release' 595 // semantics, if the stored value is an object reference that might 596 // point to a new object and may become externally visible. 597 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 598 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 599 600 virtual uint hash() const; // Check the type 601 602 // If the store is to Field memory and the pointer is non-null, we can 603 // zero out the control input. 604 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 605 606 // Compute a new Type for this node. Basically we just do the pre-check, 607 // then call the virtual add() to set the type. 608 virtual const Type* Value(PhaseGVN* phase) const; 609 610 // Check for identity function on memory (Load then Store at same address) 611 virtual Node* Identity(PhaseGVN* phase); 612 613 // Do not match memory edge 614 virtual uint match_edge(uint idx) const; 615 616 virtual const Type *bottom_type() const; // returns Type::MEMORY 617 618 // Map a store opcode to its corresponding own opcode, trivially. 619 virtual int store_Opcode() const { return Opcode(); } 620 621 // have all possible loads of the value stored been optimized away? 622 bool value_never_loaded(PhaseTransform *phase) const; 623 624 MemBarNode* trailing_membar() const; 625 }; 626 627 //------------------------------StoreBNode------------------------------------- 628 // Store byte to memory 629 class StoreBNode : public StoreNode { 630 public: 631 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 632 : StoreNode(c, mem, adr, at, val, mo) {} 633 virtual int Opcode() const; 634 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 635 virtual BasicType memory_type() const { return T_BYTE; } 636 }; 637 638 //------------------------------StoreCNode------------------------------------- 639 // Store char/short to memory 640 class StoreCNode : public StoreNode { 641 public: 642 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 643 : StoreNode(c, mem, adr, at, val, mo) {} 644 virtual int Opcode() const; 645 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 646 virtual BasicType memory_type() const { return T_CHAR; } 647 }; 648 649 //------------------------------StoreINode------------------------------------- 650 // Store int to memory 651 class StoreINode : public StoreNode { 652 public: 653 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 654 : StoreNode(c, mem, adr, at, val, mo) {} 655 virtual int Opcode() const; 656 virtual BasicType memory_type() const { return T_INT; } 657 }; 658 659 //------------------------------StoreLNode------------------------------------- 660 // Store long to memory 661 class StoreLNode : public StoreNode { 662 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 663 virtual bool cmp( const Node &n ) const { 664 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 665 && StoreNode::cmp(n); 666 } 667 virtual uint size_of() const { return sizeof(*this); } 668 const bool _require_atomic_access; // is piecewise store forbidden? 669 670 public: 671 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 672 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 673 virtual int Opcode() const; 674 virtual BasicType memory_type() const { return T_LONG; } 675 bool require_atomic_access() const { return _require_atomic_access; } 676 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 677 #ifndef PRODUCT 678 virtual void dump_spec(outputStream *st) const { 679 StoreNode::dump_spec(st); 680 if (_require_atomic_access) st->print(" Atomic!"); 681 } 682 #endif 683 }; 684 685 //------------------------------StoreFNode------------------------------------- 686 // Store float to memory 687 class StoreFNode : public StoreNode { 688 public: 689 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 690 : StoreNode(c, mem, adr, at, val, mo) {} 691 virtual int Opcode() const; 692 virtual BasicType memory_type() const { return T_FLOAT; } 693 }; 694 695 //------------------------------StoreDNode------------------------------------- 696 // Store double to memory 697 class StoreDNode : public StoreNode { 698 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 699 virtual bool cmp( const Node &n ) const { 700 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 701 && StoreNode::cmp(n); 702 } 703 virtual uint size_of() const { return sizeof(*this); } 704 const bool _require_atomic_access; // is piecewise store forbidden? 705 public: 706 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 707 MemOrd mo, bool require_atomic_access = false) 708 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 709 virtual int Opcode() const; 710 virtual BasicType memory_type() const { return T_DOUBLE; } 711 bool require_atomic_access() const { return _require_atomic_access; } 712 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 713 #ifndef PRODUCT 714 virtual void dump_spec(outputStream *st) const { 715 StoreNode::dump_spec(st); 716 if (_require_atomic_access) st->print(" Atomic!"); 717 } 718 #endif 719 720 }; 721 722 //------------------------------StorePNode------------------------------------- 723 // Store pointer to memory 724 class StorePNode : public StoreNode { 725 public: 726 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 727 : StoreNode(c, mem, adr, at, val, mo) {} 728 virtual int Opcode() const; 729 virtual BasicType memory_type() const { return T_ADDRESS; } 730 }; 731 732 //------------------------------StoreNNode------------------------------------- 733 // Store narrow oop to memory 734 class StoreNNode : public StoreNode { 735 public: 736 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 737 : StoreNode(c, mem, adr, at, val, mo) {} 738 virtual int Opcode() const; 739 virtual BasicType memory_type() const { return T_NARROWOOP; } 740 }; 741 742 //------------------------------StoreNKlassNode-------------------------------------- 743 // Store narrow klass to memory 744 class StoreNKlassNode : public StoreNNode { 745 public: 746 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 747 : StoreNNode(c, mem, adr, at, val, mo) {} 748 virtual int Opcode() const; 749 virtual BasicType memory_type() const { return T_NARROWKLASS; } 750 }; 751 752 //------------------------------StoreCMNode----------------------------------- 753 // Store card-mark byte to memory for CM 754 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 755 // Preceeding equivalent StoreCMs may be eliminated. 756 class StoreCMNode : public StoreNode { 757 private: 758 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 759 virtual bool cmp( const Node &n ) const { 760 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 761 && StoreNode::cmp(n); 762 } 763 virtual uint size_of() const { return sizeof(*this); } 764 int _oop_alias_idx; // The alias_idx of OopStore 765 766 public: 767 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 768 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 769 _oop_alias_idx(oop_alias_idx) { 770 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 771 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 772 "bad oop alias idx"); 773 } 774 virtual int Opcode() const; 775 virtual Node* Identity(PhaseGVN* phase); 776 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 777 virtual const Type* Value(PhaseGVN* phase) const; 778 virtual BasicType memory_type() const { return T_VOID; } // unspecific 779 int oop_alias_idx() const { return _oop_alias_idx; } 780 }; 781 782 //------------------------------LoadPLockedNode--------------------------------- 783 // Load-locked a pointer from memory (either object or array). 784 // On Sparc & Intel this is implemented as a normal pointer load. 785 // On PowerPC and friends it's a real load-locked. 786 class LoadPLockedNode : public LoadPNode { 787 public: 788 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 789 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 790 virtual int Opcode() const; 791 virtual int store_Opcode() const { return Op_StorePConditional; } 792 virtual bool depends_only_on_test() const { return true; } 793 }; 794 795 //------------------------------SCMemProjNode--------------------------------------- 796 // This class defines a projection of the memory state of a store conditional node. 797 // These nodes return a value, but also update memory. 798 class SCMemProjNode : public ProjNode { 799 public: 800 enum {SCMEMPROJCON = (uint)-2}; 801 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 802 virtual int Opcode() const; 803 virtual bool is_CFG() const { return false; } 804 virtual const Type *bottom_type() const {return Type::MEMORY;} 805 virtual const TypePtr *adr_type() const { 806 Node* ctrl = in(0); 807 if (ctrl == NULL) return NULL; // node is dead 808 return ctrl->in(MemNode::Memory)->adr_type(); 809 } 810 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 811 virtual const Type* Value(PhaseGVN* phase) const; 812 #ifndef PRODUCT 813 virtual void dump_spec(outputStream *st) const {}; 814 #endif 815 }; 816 817 //------------------------------LoadStoreNode--------------------------- 818 // Note: is_Mem() method returns 'true' for this class. 819 class LoadStoreNode : public Node { 820 private: 821 const Type* const _type; // What kind of value is loaded? 822 const TypePtr* _adr_type; // What kind of memory is being addressed? 823 bool _has_barrier; 824 virtual uint size_of() const; // Size is bigger 825 public: 826 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 827 virtual bool depends_only_on_test() const { return false; } 828 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 829 830 virtual const Type *bottom_type() const { return _type; } 831 virtual uint ideal_reg() const; 832 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 833 834 bool result_not_used() const; 835 MemBarNode* trailing_membar() const; 836 void set_has_barrier() { _has_barrier = true; }; 837 bool has_barrier() const { return _has_barrier; }; 838 }; 839 840 class LoadStoreConditionalNode : public LoadStoreNode { 841 public: 842 enum { 843 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 844 }; 845 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 846 }; 847 848 //------------------------------StorePConditionalNode--------------------------- 849 // Conditionally store pointer to memory, if no change since prior 850 // load-locked. Sets flags for success or failure of the store. 851 class StorePConditionalNode : public LoadStoreConditionalNode { 852 public: 853 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 854 virtual int Opcode() const; 855 // Produces flags 856 virtual uint ideal_reg() const { return Op_RegFlags; } 857 }; 858 859 //------------------------------StoreIConditionalNode--------------------------- 860 // Conditionally store int to memory, if no change since prior 861 // load-locked. Sets flags for success or failure of the store. 862 class StoreIConditionalNode : public LoadStoreConditionalNode { 863 public: 864 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 865 virtual int Opcode() const; 866 // Produces flags 867 virtual uint ideal_reg() const { return Op_RegFlags; } 868 }; 869 870 //------------------------------StoreLConditionalNode--------------------------- 871 // Conditionally store long to memory, if no change since prior 872 // load-locked. Sets flags for success or failure of the store. 873 class StoreLConditionalNode : public LoadStoreConditionalNode { 874 public: 875 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 876 virtual int Opcode() const; 877 // Produces flags 878 virtual uint ideal_reg() const { return Op_RegFlags; } 879 }; 880 881 class CompareAndSwapNode : public LoadStoreConditionalNode { 882 private: 883 const MemNode::MemOrd _mem_ord; 884 public: 885 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 886 MemNode::MemOrd order() const { 887 return _mem_ord; 888 } 889 }; 890 891 class CompareAndExchangeNode : public LoadStoreNode { 892 private: 893 const MemNode::MemOrd _mem_ord; 894 public: 895 enum { 896 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 897 }; 898 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 899 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 900 init_req(ExpectedIn, ex ); 901 } 902 903 MemNode::MemOrd order() const { 904 return _mem_ord; 905 } 906 }; 907 908 //------------------------------CompareAndSwapBNode--------------------------- 909 class CompareAndSwapBNode : public CompareAndSwapNode { 910 public: 911 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 912 virtual int Opcode() const; 913 }; 914 915 //------------------------------CompareAndSwapSNode--------------------------- 916 class CompareAndSwapSNode : public CompareAndSwapNode { 917 public: 918 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 919 virtual int Opcode() const; 920 }; 921 922 //------------------------------CompareAndSwapINode--------------------------- 923 class CompareAndSwapINode : public CompareAndSwapNode { 924 public: 925 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 926 virtual int Opcode() const; 927 }; 928 929 //------------------------------CompareAndSwapLNode--------------------------- 930 class CompareAndSwapLNode : public CompareAndSwapNode { 931 public: 932 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 933 virtual int Opcode() const; 934 }; 935 936 //------------------------------CompareAndSwapPNode--------------------------- 937 class CompareAndSwapPNode : public CompareAndSwapNode { 938 public: 939 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 940 virtual int Opcode() const; 941 }; 942 943 //------------------------------CompareAndSwapNNode--------------------------- 944 class CompareAndSwapNNode : public CompareAndSwapNode { 945 public: 946 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 947 virtual int Opcode() const; 948 }; 949 950 //------------------------------WeakCompareAndSwapBNode--------------------------- 951 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 952 public: 953 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 954 virtual int Opcode() const; 955 }; 956 957 //------------------------------WeakCompareAndSwapSNode--------------------------- 958 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 959 public: 960 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 961 virtual int Opcode() const; 962 }; 963 964 //------------------------------WeakCompareAndSwapINode--------------------------- 965 class WeakCompareAndSwapINode : public CompareAndSwapNode { 966 public: 967 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 968 virtual int Opcode() const; 969 }; 970 971 //------------------------------WeakCompareAndSwapLNode--------------------------- 972 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 973 public: 974 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 975 virtual int Opcode() const; 976 }; 977 978 //------------------------------WeakCompareAndSwapPNode--------------------------- 979 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 980 public: 981 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 982 virtual int Opcode() const; 983 }; 984 985 //------------------------------WeakCompareAndSwapNNode--------------------------- 986 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 987 public: 988 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 989 virtual int Opcode() const; 990 }; 991 992 //------------------------------CompareAndExchangeBNode--------------------------- 993 class CompareAndExchangeBNode : public CompareAndExchangeNode { 994 public: 995 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 996 virtual int Opcode() const; 997 }; 998 999 1000 //------------------------------CompareAndExchangeSNode--------------------------- 1001 class CompareAndExchangeSNode : public CompareAndExchangeNode { 1002 public: 1003 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 1004 virtual int Opcode() const; 1005 }; 1006 1007 //------------------------------CompareAndExchangeLNode--------------------------- 1008 class CompareAndExchangeLNode : public CompareAndExchangeNode { 1009 public: 1010 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 1011 virtual int Opcode() const; 1012 }; 1013 1014 1015 //------------------------------CompareAndExchangeINode--------------------------- 1016 class CompareAndExchangeINode : public CompareAndExchangeNode { 1017 public: 1018 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 1019 virtual int Opcode() const; 1020 }; 1021 1022 1023 //------------------------------CompareAndExchangePNode--------------------------- 1024 class CompareAndExchangePNode : public CompareAndExchangeNode { 1025 public: 1026 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1027 virtual int Opcode() const; 1028 }; 1029 1030 //------------------------------CompareAndExchangeNNode--------------------------- 1031 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1032 public: 1033 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1034 virtual int Opcode() const; 1035 }; 1036 1037 //------------------------------GetAndAddBNode--------------------------- 1038 class GetAndAddBNode : public LoadStoreNode { 1039 public: 1040 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1041 virtual int Opcode() const; 1042 }; 1043 1044 //------------------------------GetAndAddSNode--------------------------- 1045 class GetAndAddSNode : public LoadStoreNode { 1046 public: 1047 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1048 virtual int Opcode() const; 1049 }; 1050 1051 //------------------------------GetAndAddINode--------------------------- 1052 class GetAndAddINode : public LoadStoreNode { 1053 public: 1054 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1055 virtual int Opcode() const; 1056 }; 1057 1058 //------------------------------GetAndAddLNode--------------------------- 1059 class GetAndAddLNode : public LoadStoreNode { 1060 public: 1061 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1062 virtual int Opcode() const; 1063 }; 1064 1065 //------------------------------GetAndSetBNode--------------------------- 1066 class GetAndSetBNode : public LoadStoreNode { 1067 public: 1068 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1069 virtual int Opcode() const; 1070 }; 1071 1072 //------------------------------GetAndSetSNode--------------------------- 1073 class GetAndSetSNode : public LoadStoreNode { 1074 public: 1075 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1076 virtual int Opcode() const; 1077 }; 1078 1079 //------------------------------GetAndSetINode--------------------------- 1080 class GetAndSetINode : public LoadStoreNode { 1081 public: 1082 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1083 virtual int Opcode() const; 1084 }; 1085 1086 //------------------------------GetAndSetLNode--------------------------- 1087 class GetAndSetLNode : public LoadStoreNode { 1088 public: 1089 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1090 virtual int Opcode() const; 1091 }; 1092 1093 //------------------------------GetAndSetPNode--------------------------- 1094 class GetAndSetPNode : public LoadStoreNode { 1095 public: 1096 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1097 virtual int Opcode() const; 1098 }; 1099 1100 //------------------------------GetAndSetNNode--------------------------- 1101 class GetAndSetNNode : public LoadStoreNode { 1102 public: 1103 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1104 virtual int Opcode() const; 1105 }; 1106 1107 //------------------------------ClearArray------------------------------------- 1108 class ClearArrayNode: public Node { 1109 private: 1110 bool _is_large; 1111 public: 1112 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1113 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1114 init_class_id(Class_ClearArray); 1115 } 1116 virtual int Opcode() const; 1117 virtual const Type *bottom_type() const { return Type::MEMORY; } 1118 // ClearArray modifies array elements, and so affects only the 1119 // array memory addressed by the bottom_type of its base address. 1120 virtual const class TypePtr *adr_type() const; 1121 virtual Node* Identity(PhaseGVN* phase); 1122 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1123 virtual uint match_edge(uint idx) const; 1124 bool is_large() const { return _is_large; } 1125 1126 // Clear the given area of an object or array. 1127 // The start offset must always be aligned mod BytesPerInt. 1128 // The end offset must always be aligned mod BytesPerLong. 1129 // Return the new memory. 1130 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1131 intptr_t start_offset, 1132 intptr_t end_offset, 1133 PhaseGVN* phase); 1134 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1135 intptr_t start_offset, 1136 Node* end_offset, 1137 PhaseGVN* phase); 1138 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1139 Node* start_offset, 1140 Node* end_offset, 1141 PhaseGVN* phase); 1142 // Return allocation input memory edge if it is different instance 1143 // or itself if it is the one we are looking for. 1144 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1145 }; 1146 1147 //------------------------------MemBar----------------------------------------- 1148 // There are different flavors of Memory Barriers to match the Java Memory 1149 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1150 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1151 // volatile-load. Monitor-exit and volatile-store act as Release: no 1152 // preceding ref can be moved to after them. We insert a MemBar-Release 1153 // before a FastUnlock or volatile-store. All volatiles need to be 1154 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1155 // separate it from any following volatile-load. 1156 class MemBarNode: public MultiNode { 1157 virtual uint hash() const ; // { return NO_HASH; } 1158 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1159 1160 virtual uint size_of() const { return sizeof(*this); } 1161 // Memory type this node is serializing. Usually either rawptr or bottom. 1162 const TypePtr* _adr_type; 1163 1164 // How is this membar related to a nearby memory access? 1165 enum { 1166 Standalone, 1167 TrailingLoad, 1168 TrailingStore, 1169 LeadingStore, 1170 TrailingLoadStore, 1171 LeadingLoadStore 1172 } _kind; 1173 1174 #ifdef ASSERT 1175 uint _pair_idx; 1176 #endif 1177 1178 public: 1179 enum { 1180 Precedent = TypeFunc::Parms // optional edge to force precedence 1181 }; 1182 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1183 virtual int Opcode() const = 0; 1184 virtual const class TypePtr *adr_type() const { return _adr_type; } 1185 virtual const Type* Value(PhaseGVN* phase) const; 1186 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1187 virtual uint match_edge(uint idx) const { return 0; } 1188 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1189 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1190 // Factory method. Builds a wide or narrow membar. 1191 // Optional 'precedent' becomes an extra edge if not null. 1192 static MemBarNode* make(Compile* C, int opcode, 1193 int alias_idx = Compile::AliasIdxBot, 1194 Node* precedent = NULL); 1195 1196 MemBarNode* trailing_membar() const; 1197 MemBarNode* leading_membar() const; 1198 1199 void set_trailing_load() { _kind = TrailingLoad; } 1200 bool trailing_load() const { return _kind == TrailingLoad; } 1201 bool trailing_store() const { return _kind == TrailingStore; } 1202 bool leading_store() const { return _kind == LeadingStore; } 1203 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1204 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1205 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1206 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1207 bool standalone() const { return _kind == Standalone; } 1208 1209 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1210 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1211 1212 void remove(PhaseIterGVN *igvn); 1213 }; 1214 1215 // "Acquire" - no following ref can move before (but earlier refs can 1216 // follow, like an early Load stalled in cache). Requires multi-cpu 1217 // visibility. Inserted after a volatile load. 1218 class MemBarAcquireNode: public MemBarNode { 1219 public: 1220 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1221 : MemBarNode(C, alias_idx, precedent) {} 1222 virtual int Opcode() const; 1223 }; 1224 1225 // "Acquire" - no following ref can move before (but earlier refs can 1226 // follow, like an early Load stalled in cache). Requires multi-cpu 1227 // visibility. Inserted independ of any load, as required 1228 // for intrinsic Unsafe.loadFence(). 1229 class LoadFenceNode: public MemBarNode { 1230 public: 1231 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1232 : MemBarNode(C, alias_idx, precedent) {} 1233 virtual int Opcode() const; 1234 }; 1235 1236 // "Release" - no earlier ref can move after (but later refs can move 1237 // up, like a speculative pipelined cache-hitting Load). Requires 1238 // multi-cpu visibility. Inserted before a volatile store. 1239 class MemBarReleaseNode: public MemBarNode { 1240 public: 1241 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1242 : MemBarNode(C, alias_idx, precedent) {} 1243 virtual int Opcode() const; 1244 }; 1245 1246 // "Release" - no earlier ref can move after (but later refs can move 1247 // up, like a speculative pipelined cache-hitting Load). Requires 1248 // multi-cpu visibility. Inserted independent of any store, as required 1249 // for intrinsic Unsafe.storeFence(). 1250 class StoreFenceNode: public MemBarNode { 1251 public: 1252 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1253 : MemBarNode(C, alias_idx, precedent) {} 1254 virtual int Opcode() const; 1255 }; 1256 1257 // "Acquire" - no following ref can move before (but earlier refs can 1258 // follow, like an early Load stalled in cache). Requires multi-cpu 1259 // visibility. Inserted after a FastLock. 1260 class MemBarAcquireLockNode: public MemBarNode { 1261 public: 1262 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1263 : MemBarNode(C, alias_idx, precedent) {} 1264 virtual int Opcode() const; 1265 }; 1266 1267 // "Release" - no earlier ref can move after (but later refs can move 1268 // up, like a speculative pipelined cache-hitting Load). Requires 1269 // multi-cpu visibility. Inserted before a FastUnLock. 1270 class MemBarReleaseLockNode: public MemBarNode { 1271 public: 1272 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1273 : MemBarNode(C, alias_idx, precedent) {} 1274 virtual int Opcode() const; 1275 }; 1276 1277 class MemBarStoreStoreNode: public MemBarNode { 1278 public: 1279 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1280 : MemBarNode(C, alias_idx, precedent) { 1281 init_class_id(Class_MemBarStoreStore); 1282 } 1283 virtual int Opcode() const; 1284 }; 1285 1286 // Ordering between a volatile store and a following volatile load. 1287 // Requires multi-CPU visibility? 1288 class MemBarVolatileNode: public MemBarNode { 1289 public: 1290 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1291 : MemBarNode(C, alias_idx, precedent) {} 1292 virtual int Opcode() const; 1293 }; 1294 1295 // Ordering within the same CPU. Used to order unsafe memory references 1296 // inside the compiler when we lack alias info. Not needed "outside" the 1297 // compiler because the CPU does all the ordering for us. 1298 class MemBarCPUOrderNode: public MemBarNode { 1299 public: 1300 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1301 : MemBarNode(C, alias_idx, precedent) {} 1302 virtual int Opcode() const; 1303 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1304 }; 1305 1306 class OnSpinWaitNode: public MemBarNode { 1307 public: 1308 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1309 : MemBarNode(C, alias_idx, precedent) {} 1310 virtual int Opcode() const; 1311 }; 1312 1313 // Isolation of object setup after an AllocateNode and before next safepoint. 1314 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1315 class InitializeNode: public MemBarNode { 1316 friend class AllocateNode; 1317 1318 enum { 1319 Incomplete = 0, 1320 Complete = 1, 1321 WithArraycopy = 2 1322 }; 1323 int _is_complete; 1324 1325 bool _does_not_escape; 1326 1327 public: 1328 enum { 1329 Control = TypeFunc::Control, 1330 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1331 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1332 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1333 }; 1334 1335 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1336 virtual int Opcode() const; 1337 virtual uint size_of() const { return sizeof(*this); } 1338 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1339 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1340 1341 // Manage incoming memory edges via a MergeMem on in(Memory): 1342 Node* memory(uint alias_idx); 1343 1344 // The raw memory edge coming directly from the Allocation. 1345 // The contents of this memory are *always* all-zero-bits. 1346 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1347 1348 // Return the corresponding allocation for this initialization (or null if none). 1349 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1350 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1351 AllocateNode* allocation(); 1352 1353 // Anything other than zeroing in this init? 1354 bool is_non_zero(); 1355 1356 // An InitializeNode must completed before macro expansion is done. 1357 // Completion requires that the AllocateNode must be followed by 1358 // initialization of the new memory to zero, then to any initializers. 1359 bool is_complete() { return _is_complete != Incomplete; } 1360 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1361 1362 // Mark complete. (Must not yet be complete.) 1363 void set_complete(PhaseGVN* phase); 1364 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1365 1366 bool does_not_escape() { return _does_not_escape; } 1367 void set_does_not_escape() { _does_not_escape = true; } 1368 1369 #ifdef ASSERT 1370 // ensure all non-degenerate stores are ordered and non-overlapping 1371 bool stores_are_sane(PhaseTransform* phase); 1372 #endif //ASSERT 1373 1374 // See if this store can be captured; return offset where it initializes. 1375 // Return 0 if the store cannot be moved (any sort of problem). 1376 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1377 1378 // Capture another store; reformat it to write my internal raw memory. 1379 // Return the captured copy, else NULL if there is some sort of problem. 1380 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1381 1382 // Find captured store which corresponds to the range [start..start+size). 1383 // Return my own memory projection (meaning the initial zero bits) 1384 // if there is no such store. Return NULL if there is a problem. 1385 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1386 1387 // Called when the associated AllocateNode is expanded into CFG. 1388 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1389 intptr_t header_size, Node* size_in_bytes, 1390 PhaseGVN* phase); 1391 1392 private: 1393 void remove_extra_zeroes(); 1394 1395 // Find out where a captured store should be placed (or already is placed). 1396 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1397 PhaseTransform* phase); 1398 1399 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1400 1401 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1402 1403 bool detect_init_independence(Node* n, int& count); 1404 1405 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1406 PhaseGVN* phase); 1407 1408 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1409 }; 1410 1411 //------------------------------MergeMem--------------------------------------- 1412 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1413 class MergeMemNode: public Node { 1414 virtual uint hash() const ; // { return NO_HASH; } 1415 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1416 friend class MergeMemStream; 1417 MergeMemNode(Node* def); // clients use MergeMemNode::make 1418 1419 public: 1420 // If the input is a whole memory state, clone it with all its slices intact. 1421 // Otherwise, make a new memory state with just that base memory input. 1422 // In either case, the result is a newly created MergeMem. 1423 static MergeMemNode* make(Node* base_memory); 1424 1425 virtual int Opcode() const; 1426 virtual Node* Identity(PhaseGVN* phase); 1427 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1428 virtual uint ideal_reg() const { return NotAMachineReg; } 1429 virtual uint match_edge(uint idx) const { return 0; } 1430 virtual const RegMask &out_RegMask() const; 1431 virtual const Type *bottom_type() const { return Type::MEMORY; } 1432 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1433 // sparse accessors 1434 // Fetch the previously stored "set_memory_at", or else the base memory. 1435 // (Caller should clone it if it is a phi-nest.) 1436 Node* memory_at(uint alias_idx) const; 1437 // set the memory, regardless of its previous value 1438 void set_memory_at(uint alias_idx, Node* n); 1439 // the "base" is the memory that provides the non-finite support 1440 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1441 // warning: setting the base can implicitly set any of the other slices too 1442 void set_base_memory(Node* def); 1443 // sentinel value which denotes a copy of the base memory: 1444 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1445 static Node* make_empty_memory(); // where the sentinel comes from 1446 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1447 // hook for the iterator, to perform any necessary setup 1448 void iteration_setup(const MergeMemNode* other = NULL); 1449 // push sentinels until I am at least as long as the other (semantic no-op) 1450 void grow_to_match(const MergeMemNode* other); 1451 bool verify_sparse() const PRODUCT_RETURN0; 1452 #ifndef PRODUCT 1453 virtual void dump_spec(outputStream *st) const; 1454 #endif 1455 }; 1456 1457 class MergeMemStream : public StackObj { 1458 private: 1459 MergeMemNode* _mm; 1460 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1461 Node* _mm_base; // loop-invariant base memory of _mm 1462 int _idx; 1463 int _cnt; 1464 Node* _mem; 1465 Node* _mem2; 1466 int _cnt2; 1467 1468 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1469 // subsume_node will break sparseness at times, whenever a memory slice 1470 // folds down to a copy of the base ("fat") memory. In such a case, 1471 // the raw edge will update to base, although it should be top. 1472 // This iterator will recognize either top or base_memory as an 1473 // "empty" slice. See is_empty, is_empty2, and next below. 1474 // 1475 // The sparseness property is repaired in MergeMemNode::Ideal. 1476 // As long as access to a MergeMem goes through this iterator 1477 // or the memory_at accessor, flaws in the sparseness will 1478 // never be observed. 1479 // 1480 // Also, iteration_setup repairs sparseness. 1481 assert(mm->verify_sparse(), "please, no dups of base"); 1482 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1483 1484 _mm = mm; 1485 _mm_base = mm->base_memory(); 1486 _mm2 = mm2; 1487 _cnt = mm->req(); 1488 _idx = Compile::AliasIdxBot-1; // start at the base memory 1489 _mem = NULL; 1490 _mem2 = NULL; 1491 } 1492 1493 #ifdef ASSERT 1494 Node* check_memory() const { 1495 if (at_base_memory()) 1496 return _mm->base_memory(); 1497 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1498 return _mm->memory_at(_idx); 1499 else 1500 return _mm_base; 1501 } 1502 Node* check_memory2() const { 1503 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1504 } 1505 #endif 1506 1507 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1508 void assert_synch() const { 1509 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1510 "no side-effects except through the stream"); 1511 } 1512 1513 public: 1514 1515 // expected usages: 1516 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1517 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1518 1519 // iterate over one merge 1520 MergeMemStream(MergeMemNode* mm) { 1521 mm->iteration_setup(); 1522 init(mm); 1523 debug_only(_cnt2 = 999); 1524 } 1525 // iterate in parallel over two merges 1526 // only iterates through non-empty elements of mm2 1527 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1528 assert(mm2, "second argument must be a MergeMem also"); 1529 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1530 mm->iteration_setup(mm2); 1531 init(mm, mm2); 1532 _cnt2 = mm2->req(); 1533 } 1534 #ifdef ASSERT 1535 ~MergeMemStream() { 1536 assert_synch(); 1537 } 1538 #endif 1539 1540 MergeMemNode* all_memory() const { 1541 return _mm; 1542 } 1543 Node* base_memory() const { 1544 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1545 return _mm_base; 1546 } 1547 const MergeMemNode* all_memory2() const { 1548 assert(_mm2 != NULL, ""); 1549 return _mm2; 1550 } 1551 bool at_base_memory() const { 1552 return _idx == Compile::AliasIdxBot; 1553 } 1554 int alias_idx() const { 1555 assert(_mem, "must call next 1st"); 1556 return _idx; 1557 } 1558 1559 const TypePtr* adr_type() const { 1560 return Compile::current()->get_adr_type(alias_idx()); 1561 } 1562 1563 const TypePtr* adr_type(Compile* C) const { 1564 return C->get_adr_type(alias_idx()); 1565 } 1566 bool is_empty() const { 1567 assert(_mem, "must call next 1st"); 1568 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1569 return _mem->is_top(); 1570 } 1571 bool is_empty2() const { 1572 assert(_mem2, "must call next 1st"); 1573 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1574 return _mem2->is_top(); 1575 } 1576 Node* memory() const { 1577 assert(!is_empty(), "must not be empty"); 1578 assert_synch(); 1579 return _mem; 1580 } 1581 // get the current memory, regardless of empty or non-empty status 1582 Node* force_memory() const { 1583 assert(!is_empty() || !at_base_memory(), ""); 1584 // Use _mm_base to defend against updates to _mem->base_memory(). 1585 Node *mem = _mem->is_top() ? _mm_base : _mem; 1586 assert(mem == check_memory(), ""); 1587 return mem; 1588 } 1589 Node* memory2() const { 1590 assert(_mem2 == check_memory2(), ""); 1591 return _mem2; 1592 } 1593 void set_memory(Node* mem) { 1594 if (at_base_memory()) { 1595 // Note that this does not change the invariant _mm_base. 1596 _mm->set_base_memory(mem); 1597 } else { 1598 _mm->set_memory_at(_idx, mem); 1599 } 1600 _mem = mem; 1601 assert_synch(); 1602 } 1603 1604 // Recover from a side effect to the MergeMemNode. 1605 void set_memory() { 1606 _mem = _mm->in(_idx); 1607 } 1608 1609 bool next() { return next(false); } 1610 bool next2() { return next(true); } 1611 1612 bool next_non_empty() { return next_non_empty(false); } 1613 bool next_non_empty2() { return next_non_empty(true); } 1614 // next_non_empty2 can yield states where is_empty() is true 1615 1616 private: 1617 // find the next item, which might be empty 1618 bool next(bool have_mm2) { 1619 assert((_mm2 != NULL) == have_mm2, "use other next"); 1620 assert_synch(); 1621 if (++_idx < _cnt) { 1622 // Note: This iterator allows _mm to be non-sparse. 1623 // It behaves the same whether _mem is top or base_memory. 1624 _mem = _mm->in(_idx); 1625 if (have_mm2) 1626 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1627 return true; 1628 } 1629 return false; 1630 } 1631 1632 // find the next non-empty item 1633 bool next_non_empty(bool have_mm2) { 1634 while (next(have_mm2)) { 1635 if (!is_empty()) { 1636 // make sure _mem2 is filled in sensibly 1637 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1638 return true; 1639 } else if (have_mm2 && !is_empty2()) { 1640 return true; // is_empty() == true 1641 } 1642 } 1643 return false; 1644 } 1645 }; 1646 1647 // cachewb node for guaranteeing writeback of the cache line at a 1648 // given address to (non-volatile) RAM 1649 class CacheWBNode : public Node { 1650 public: 1651 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1652 virtual int Opcode() const; 1653 virtual uint ideal_reg() const { return NotAMachineReg; } 1654 virtual uint match_edge(uint idx) const { return (idx == 2); } 1655 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1656 virtual const Type *bottom_type() const { return Type::MEMORY; } 1657 }; 1658 1659 // cachewb pre sync node for ensuring that writebacks are serialised 1660 // relative to preceding or following stores 1661 class CacheWBPreSyncNode : public Node { 1662 public: 1663 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1664 virtual int Opcode() const; 1665 virtual uint ideal_reg() const { return NotAMachineReg; } 1666 virtual uint match_edge(uint idx) const { return false; } 1667 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1668 virtual const Type *bottom_type() const { return Type::MEMORY; } 1669 }; 1670 1671 // cachewb pre sync node for ensuring that writebacks are serialised 1672 // relative to preceding or following stores 1673 class CacheWBPostSyncNode : public Node { 1674 public: 1675 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1676 virtual int Opcode() const; 1677 virtual uint ideal_reg() const { return NotAMachineReg; } 1678 virtual uint match_edge(uint idx) const { return false; } 1679 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1680 virtual const Type *bottom_type() const { return Type::MEMORY; } 1681 }; 1682 1683 //------------------------------Prefetch--------------------------------------- 1684 1685 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1686 class PrefetchAllocationNode : public Node { 1687 public: 1688 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1689 virtual int Opcode() const; 1690 virtual uint ideal_reg() const { return NotAMachineReg; } 1691 virtual uint match_edge(uint idx) const { return idx==2; } 1692 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1693 }; 1694 1695 #endif // SHARE_OPTO_MEMNODE_HPP