1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OPTO_MEMNODE_HPP 26 #define SHARE_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 bool _unsafe_access; // Access of unsafe origin. 46 protected: 47 #ifdef ASSERT 48 const TypePtr* _adr_type; // What kind of memory is being addressed? 49 #endif 50 virtual uint size_of() const; 51 public: 52 enum { Control, // When is it safe to do this load? 53 Memory, // Chunk of memory is being loaded from 54 Address, // Actually address, derived from base 55 ValueIn, // Value to store 56 OopStore // Preceeding oop store, only in StoreCM 57 }; 58 typedef enum { unordered = 0, 59 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 60 release, // Store has to release or be preceded by MemBarRelease. 61 seqcst, // LoadStore has to have both acquire and release semantics. 62 unset // The memory ordering is not set (used for testing) 63 } MemOrd; 64 protected: 65 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 66 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 67 init_class_id(Class_Mem); 68 debug_only(_adr_type=at; adr_type();) 69 } 70 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 71 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 72 init_class_id(Class_Mem); 73 debug_only(_adr_type=at; adr_type();) 74 } 75 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 76 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false), _unsafe_access(false) { 77 init_class_id(Class_Mem); 78 debug_only(_adr_type=at; adr_type();) 79 } 80 81 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 82 static bool check_if_adr_maybe_raw(Node* adr); 83 84 public: 85 // Helpers for the optimizer. Documented in memnode.cpp. 86 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 87 Node* p2, AllocateNode* a2, 88 PhaseTransform* phase); 89 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 90 91 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 92 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 93 // This one should probably be a phase-specific function: 94 static bool all_controls_dominate(Node* dom, Node* sub); 95 96 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 97 98 // Shared code for Ideal methods: 99 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 100 101 // Helper function for adr_type() implementations. 102 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 103 104 // Raw access function, to allow copying of adr_type efficiently in 105 // product builds and retain the debug info for debug builds. 106 const TypePtr *raw_adr_type() const { 107 #ifdef ASSERT 108 return _adr_type; 109 #else 110 return 0; 111 #endif 112 } 113 114 #ifdef ASSERT 115 void set_adr_type(const TypePtr* adr_type) { _adr_type = adr_type; } 116 #endif 117 118 // Map a load or store opcode to its corresponding store opcode. 119 // (Return -1 if unknown.) 120 virtual int store_Opcode() const { return -1; } 121 122 // What is the type of the value in memory? (T_VOID mean "unspecified".) 123 virtual BasicType memory_type() const = 0; 124 virtual int memory_size() const { 125 #ifdef ASSERT 126 return type2aelembytes(memory_type(), true); 127 #else 128 return type2aelembytes(memory_type()); 129 #endif 130 } 131 132 // Search through memory states which precede this node (load or store). 133 // Look for an exact match for the address, with no intervening 134 // aliased stores. 135 Node* find_previous_store(PhaseTransform* phase); 136 137 // Can this node (load or store) accurately see a stored value in 138 // the given memory state? (The state may or may not be in(Memory).) 139 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 140 141 void set_unaligned_access() { _unaligned_access = true; } 142 bool is_unaligned_access() const { return _unaligned_access; } 143 void set_mismatched_access() { _mismatched_access = true; } 144 bool is_mismatched_access() const { return _mismatched_access; } 145 void set_unsafe_access() { _unsafe_access = true; } 146 bool is_unsafe_access() const { return _unsafe_access; } 147 148 #ifndef PRODUCT 149 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 150 virtual void dump_spec(outputStream *st) const; 151 #endif 152 }; 153 154 //------------------------------LoadNode--------------------------------------- 155 // Load value; requires Memory and Address 156 class LoadNode : public MemNode { 157 public: 158 // Some loads (from unsafe) should be pinned: they don't depend only 159 // on the dominating test. The field _control_dependency below records 160 // whether that node depends only on the dominating test. 161 // Pinned and UnknownControl are similar, but differ in that Pinned 162 // loads are not allowed to float across safepoints, whereas UnknownControl 163 // loads are allowed to do that. Therefore, Pinned is stricter. 164 enum ControlDependency { 165 Pinned, 166 UnknownControl, 167 DependsOnlyOnTest 168 }; 169 170 private: 171 // LoadNode::hash() doesn't take the _control_dependency field 172 // into account: If the graph already has a non-pinned LoadNode and 173 // we add a pinned LoadNode with the same inputs, it's safe for GVN 174 // to replace the pinned LoadNode with the non-pinned LoadNode, 175 // otherwise it wouldn't be safe to have a non pinned LoadNode with 176 // those inputs in the first place. If the graph already has a 177 // pinned LoadNode and we add a non pinned LoadNode with the same 178 // inputs, it's safe (but suboptimal) for GVN to replace the 179 // non-pinned LoadNode by the pinned LoadNode. 180 ControlDependency _control_dependency; 181 182 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 183 // loads that can be reordered, and such requiring acquire semantics to 184 // adhere to the Java specification. The required behaviour is stored in 185 // this field. 186 const MemOrd _mo; 187 188 uint _barrier; // Bit field with barrier information 189 190 protected: 191 virtual bool cmp(const Node &n) const; 192 virtual uint size_of() const; // Size is bigger 193 // Should LoadNode::Ideal() attempt to remove control edges? 194 virtual bool can_remove_control() const; 195 const Type* const _type; // What kind of value is loaded? 196 197 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 198 public: 199 200 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 201 : MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _barrier(0), _type(rt) { 202 init_class_id(Class_Load); 203 } 204 inline bool is_unordered() const { return !is_acquire(); } 205 inline bool is_acquire() const { 206 assert(_mo == unordered || _mo == acquire, "unexpected"); 207 return _mo == acquire; 208 } 209 inline bool is_unsigned() const { 210 int lop = Opcode(); 211 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 212 } 213 214 // Polymorphic factory method: 215 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 216 const TypePtr* at, const Type *rt, BasicType bt, 217 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 218 bool unaligned = false, bool mismatched = false, bool unsafe = false); 219 220 virtual uint hash() const; // Check the type 221 222 // Handle algebraic identities here. If we have an identity, return the Node 223 // we are equivalent to. We look for Load of a Store. 224 virtual Node* Identity(PhaseGVN* phase); 225 226 // If the load is from Field memory and the pointer is non-null, it might be possible to 227 // zero out the control input. 228 // If the offset is constant and the base is an object allocation, 229 // try to hook me up to the exact initializing store. 230 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 231 232 // Split instance field load through Phi. 233 Node* split_through_phi(PhaseGVN *phase); 234 235 // Recover original value from boxed values 236 Node *eliminate_autobox(PhaseGVN *phase); 237 238 // Compute a new Type for this node. Basically we just do the pre-check, 239 // then call the virtual add() to set the type. 240 virtual const Type* Value(PhaseGVN* phase) const; 241 242 // Common methods for LoadKlass and LoadNKlass nodes. 243 const Type* klass_value_common(PhaseGVN* phase) const; 244 Node* klass_identity_common(PhaseGVN* phase); 245 246 virtual uint ideal_reg() const; 247 virtual const Type *bottom_type() const; 248 // Following method is copied from TypeNode: 249 void set_type(const Type* t) { 250 assert(t != NULL, "sanity"); 251 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 252 *(const Type**)&_type = t; // cast away const-ness 253 // If this node is in the hash table, make sure it doesn't need a rehash. 254 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 255 } 256 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 257 258 // Do not match memory edge 259 virtual uint match_edge(uint idx) const; 260 261 // Map a load opcode to its corresponding store opcode. 262 virtual int store_Opcode() const = 0; 263 264 // Check if the load's memory input is a Phi node with the same control. 265 bool is_instance_field_load_with_local_phi(Node* ctrl); 266 267 Node* convert_to_unsigned_load(PhaseGVN& gvn); 268 Node* convert_to_signed_load(PhaseGVN& gvn); 269 270 void copy_barrier_info(const Node* src) { _barrier = src->as_Load()->_barrier; } 271 uint barrier_data() { return _barrier; } 272 void set_barrier_data(uint barrier_data) { _barrier |= barrier_data; } 273 274 void pin() { _control_dependency = Pinned; } 275 bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; } 276 277 #ifndef PRODUCT 278 virtual void dump_spec(outputStream *st) const; 279 #endif 280 #ifdef ASSERT 281 // Helper function to allow a raw load without control edge for some cases 282 static bool is_immutable_value(Node* adr); 283 #endif 284 protected: 285 const Type* load_array_final_field(const TypeKlassPtr *tkls, 286 ciKlass* klass) const; 287 288 Node* can_see_arraycopy_value(Node* st, PhaseGVN* phase) const; 289 290 // depends_only_on_test is almost always true, and needs to be almost always 291 // true to enable key hoisting & commoning optimizations. However, for the 292 // special case of RawPtr loads from TLS top & end, and other loads performed by 293 // GC barriers, the control edge carries the dependence preventing hoisting past 294 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 295 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 296 // which produce results (new raw memory state) inside of loops preventing all 297 // manner of other optimizations). Basically, it's ugly but so is the alternative. 298 // See comment in macro.cpp, around line 125 expand_allocate_common(). 299 virtual bool depends_only_on_test() const { 300 return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest; 301 } 302 }; 303 304 //------------------------------LoadBNode-------------------------------------- 305 // Load a byte (8bits signed) from memory 306 class LoadBNode : public LoadNode { 307 public: 308 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 309 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 310 virtual int Opcode() const; 311 virtual uint ideal_reg() const { return Op_RegI; } 312 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 313 virtual const Type* Value(PhaseGVN* phase) const; 314 virtual int store_Opcode() const { return Op_StoreB; } 315 virtual BasicType memory_type() const { return T_BYTE; } 316 }; 317 318 //------------------------------LoadUBNode------------------------------------- 319 // Load a unsigned byte (8bits unsigned) from memory 320 class LoadUBNode : public LoadNode { 321 public: 322 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 323 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 324 virtual int Opcode() const; 325 virtual uint ideal_reg() const { return Op_RegI; } 326 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 327 virtual const Type* Value(PhaseGVN* phase) const; 328 virtual int store_Opcode() const { return Op_StoreB; } 329 virtual BasicType memory_type() const { return T_BYTE; } 330 }; 331 332 //------------------------------LoadUSNode------------------------------------- 333 // Load an unsigned short/char (16bits unsigned) from memory 334 class LoadUSNode : public LoadNode { 335 public: 336 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 337 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 338 virtual int Opcode() const; 339 virtual uint ideal_reg() const { return Op_RegI; } 340 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 341 virtual const Type* Value(PhaseGVN* phase) const; 342 virtual int store_Opcode() const { return Op_StoreC; } 343 virtual BasicType memory_type() const { return T_CHAR; } 344 }; 345 346 //------------------------------LoadSNode-------------------------------------- 347 // Load a short (16bits signed) from memory 348 class LoadSNode : public LoadNode { 349 public: 350 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 351 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 352 virtual int Opcode() const; 353 virtual uint ideal_reg() const { return Op_RegI; } 354 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 355 virtual const Type* Value(PhaseGVN* phase) const; 356 virtual int store_Opcode() const { return Op_StoreC; } 357 virtual BasicType memory_type() const { return T_SHORT; } 358 }; 359 360 //------------------------------LoadINode-------------------------------------- 361 // Load an integer from memory 362 class LoadINode : public LoadNode { 363 public: 364 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 365 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 366 virtual int Opcode() const; 367 virtual uint ideal_reg() const { return Op_RegI; } 368 virtual int store_Opcode() const { return Op_StoreI; } 369 virtual BasicType memory_type() const { return T_INT; } 370 }; 371 372 //------------------------------LoadRangeNode---------------------------------- 373 // Load an array length from the array 374 class LoadRangeNode : public LoadINode { 375 public: 376 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 377 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 378 virtual int Opcode() const; 379 virtual const Type* Value(PhaseGVN* phase) const; 380 virtual Node* Identity(PhaseGVN* phase); 381 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 382 }; 383 384 //------------------------------LoadLNode-------------------------------------- 385 // Load a long from memory 386 class LoadLNode : public LoadNode { 387 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 388 virtual bool cmp( const Node &n ) const { 389 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 390 && LoadNode::cmp(n); 391 } 392 virtual uint size_of() const { return sizeof(*this); } 393 const bool _require_atomic_access; // is piecewise load forbidden? 394 395 public: 396 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 397 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 398 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 399 virtual int Opcode() const; 400 virtual uint ideal_reg() const { return Op_RegL; } 401 virtual int store_Opcode() const { return Op_StoreL; } 402 virtual BasicType memory_type() const { return T_LONG; } 403 bool require_atomic_access() const { return _require_atomic_access; } 404 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 405 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 406 bool unaligned = false, bool mismatched = false, bool unsafe = false); 407 #ifndef PRODUCT 408 virtual void dump_spec(outputStream *st) const { 409 LoadNode::dump_spec(st); 410 if (_require_atomic_access) st->print(" Atomic!"); 411 } 412 #endif 413 }; 414 415 //------------------------------LoadL_unalignedNode---------------------------- 416 // Load a long from unaligned memory 417 class LoadL_unalignedNode : public LoadLNode { 418 public: 419 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 420 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 421 virtual int Opcode() const; 422 }; 423 424 //------------------------------LoadFNode-------------------------------------- 425 // Load a float (64 bits) from memory 426 class LoadFNode : public LoadNode { 427 public: 428 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 429 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 430 virtual int Opcode() const; 431 virtual uint ideal_reg() const { return Op_RegF; } 432 virtual int store_Opcode() const { return Op_StoreF; } 433 virtual BasicType memory_type() const { return T_FLOAT; } 434 }; 435 436 //------------------------------LoadDNode-------------------------------------- 437 // Load a double (64 bits) from memory 438 class LoadDNode : public LoadNode { 439 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 440 virtual bool cmp( const Node &n ) const { 441 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 442 && LoadNode::cmp(n); 443 } 444 virtual uint size_of() const { return sizeof(*this); } 445 const bool _require_atomic_access; // is piecewise load forbidden? 446 447 public: 448 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 449 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 450 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 451 virtual int Opcode() const; 452 virtual uint ideal_reg() const { return Op_RegD; } 453 virtual int store_Opcode() const { return Op_StoreD; } 454 virtual BasicType memory_type() const { return T_DOUBLE; } 455 bool require_atomic_access() const { return _require_atomic_access; } 456 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 457 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 458 bool unaligned = false, bool mismatched = false, bool unsafe = false); 459 #ifndef PRODUCT 460 virtual void dump_spec(outputStream *st) const { 461 LoadNode::dump_spec(st); 462 if (_require_atomic_access) st->print(" Atomic!"); 463 } 464 #endif 465 }; 466 467 //------------------------------LoadD_unalignedNode---------------------------- 468 // Load a double from unaligned memory 469 class LoadD_unalignedNode : public LoadDNode { 470 public: 471 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 472 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 473 virtual int Opcode() const; 474 }; 475 476 //------------------------------LoadPNode-------------------------------------- 477 // Load a pointer from memory (either object or array) 478 class LoadPNode : public LoadNode { 479 public: 480 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 481 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 482 virtual int Opcode() const; 483 virtual uint ideal_reg() const { return Op_RegP; } 484 virtual int store_Opcode() const { return Op_StoreP; } 485 virtual BasicType memory_type() const { return T_ADDRESS; } 486 }; 487 488 489 //------------------------------LoadNNode-------------------------------------- 490 // Load a narrow oop from memory (either object or array) 491 class LoadNNode : public LoadNode { 492 public: 493 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 494 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 495 virtual int Opcode() const; 496 virtual uint ideal_reg() const { return Op_RegN; } 497 virtual int store_Opcode() const { return Op_StoreN; } 498 virtual BasicType memory_type() const { return T_NARROWOOP; } 499 }; 500 501 //------------------------------LoadKlassNode---------------------------------- 502 // Load a Klass from an object 503 class LoadKlassNode : public LoadPNode { 504 protected: 505 // In most cases, LoadKlassNode does not have the control input set. If the control 506 // input is set, it must not be removed (by LoadNode::Ideal()). 507 virtual bool can_remove_control() const; 508 public: 509 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 510 : LoadPNode(c, mem, adr, at, tk, mo) {} 511 virtual int Opcode() const; 512 virtual const Type* Value(PhaseGVN* phase) const; 513 virtual Node* Identity(PhaseGVN* phase); 514 virtual bool depends_only_on_test() const { return true; } 515 516 // Polymorphic factory method: 517 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 518 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 519 }; 520 521 //------------------------------LoadNKlassNode--------------------------------- 522 // Load a narrow Klass from an object. 523 class LoadNKlassNode : public LoadNNode { 524 public: 525 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 526 : LoadNNode(c, mem, adr, at, tk, mo) {} 527 virtual int Opcode() const; 528 virtual uint ideal_reg() const { return Op_RegN; } 529 virtual int store_Opcode() const { return Op_StoreNKlass; } 530 virtual BasicType memory_type() const { return T_NARROWKLASS; } 531 532 virtual const Type* Value(PhaseGVN* phase) const; 533 virtual Node* Identity(PhaseGVN* phase); 534 virtual bool depends_only_on_test() const { return true; } 535 }; 536 537 // Retrieve the null free property from an array klass. This is 538 // treated a bit like a field that would be read from the klass 539 // structure at runtime except, the implementation encodes the 540 // property as a bit in the klass header field of the array. This 541 // implementation detail is hidden under this node so it doesn't make 542 // a difference for high level optimizations. At final graph reshaping 543 // time, this node is turned into the actual logical operations that 544 // extract the property from the klass pointer. For this to work 545 // correctly, GetNullFreePropertyNode must take a LoadKlass/LoadNKlass 546 // input. The Ideal transformation splits the GetNullFreePropertyNode 547 // through phis, Value returns a constant if the node's input is a 548 // constant. These 2 should guarantee GetNullFreePropertyNode does 549 // indeed have a LoadKlass/LoadNKlass input at final graph reshaping 550 // time. 551 class GetNullFreePropertyNode : public Node { 552 public: 553 GetNullFreePropertyNode(Node* klass) : Node(NULL, klass) {} 554 virtual int Opcode() const; 555 virtual const Type* Value(PhaseGVN* phase) const; 556 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 557 virtual const Type* bottom_type() const { 558 if (in(1)->bottom_type()->isa_klassptr()) { 559 return TypeLong::LONG; 560 } 561 return TypeInt::INT; 562 } 563 }; 564 565 //------------------------------StoreNode-------------------------------------- 566 // Store value; requires Store, Address and Value 567 class StoreNode : public MemNode { 568 private: 569 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 570 // stores that can be reordered, and such requiring release semantics to 571 // adhere to the Java specification. The required behaviour is stored in 572 // this field. 573 const MemOrd _mo; 574 // Needed for proper cloning. 575 virtual uint size_of() const { return sizeof(*this); } 576 protected: 577 virtual bool cmp( const Node &n ) const; 578 virtual bool depends_only_on_test() const { return false; } 579 580 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 581 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 582 583 public: 584 // We must ensure that stores of object references will be visible 585 // only after the object's initialization. So the callers of this 586 // procedure must indicate that the store requires `release' 587 // semantics, if the stored value is an object reference that might 588 // point to a new object and may become externally visible. 589 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 590 : MemNode(c, mem, adr, at, val), _mo(mo) { 591 init_class_id(Class_Store); 592 } 593 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 594 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 595 init_class_id(Class_Store); 596 } 597 598 inline bool is_unordered() const { return !is_release(); } 599 inline bool is_release() const { 600 assert((_mo == unordered || _mo == release), "unexpected"); 601 return _mo == release; 602 } 603 604 // Conservatively release stores of object references in order to 605 // ensure visibility of object initialization. 606 static inline MemOrd release_if_reference(const BasicType t) { 607 #ifdef AARCH64 608 // AArch64 doesn't need a release store here because object 609 // initialization contains the necessary barriers. 610 return unordered; 611 #else 612 const MemOrd mo = (t == T_ARRAY || 613 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 614 t == T_OBJECT) ? release : unordered; 615 return mo; 616 #endif 617 } 618 619 // Polymorphic factory method 620 // 621 // We must ensure that stores of object references will be visible 622 // only after the object's initialization. So the callers of this 623 // procedure must indicate that the store requires `release' 624 // semantics, if the stored value is an object reference that might 625 // point to a new object and may become externally visible. 626 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 627 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 628 629 virtual uint hash() const; // Check the type 630 631 // If the store is to Field memory and the pointer is non-null, we can 632 // zero out the control input. 633 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 634 635 // Compute a new Type for this node. Basically we just do the pre-check, 636 // then call the virtual add() to set the type. 637 virtual const Type* Value(PhaseGVN* phase) const; 638 639 // Check for identity function on memory (Load then Store at same address) 640 virtual Node* Identity(PhaseGVN* phase); 641 642 // Do not match memory edge 643 virtual uint match_edge(uint idx) const; 644 645 virtual const Type *bottom_type() const; // returns Type::MEMORY 646 647 // Map a store opcode to its corresponding own opcode, trivially. 648 virtual int store_Opcode() const { return Opcode(); } 649 650 // have all possible loads of the value stored been optimized away? 651 bool value_never_loaded(PhaseTransform *phase) const; 652 653 MemBarNode* trailing_membar() const; 654 }; 655 656 //------------------------------StoreBNode------------------------------------- 657 // Store byte to memory 658 class StoreBNode : public StoreNode { 659 public: 660 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 661 : StoreNode(c, mem, adr, at, val, mo) {} 662 virtual int Opcode() const; 663 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 664 virtual BasicType memory_type() const { return T_BYTE; } 665 }; 666 667 //------------------------------StoreCNode------------------------------------- 668 // Store char/short to memory 669 class StoreCNode : public StoreNode { 670 public: 671 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 672 : StoreNode(c, mem, adr, at, val, mo) {} 673 virtual int Opcode() const; 674 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 675 virtual BasicType memory_type() const { return T_CHAR; } 676 }; 677 678 //------------------------------StoreINode------------------------------------- 679 // Store int to memory 680 class StoreINode : public StoreNode { 681 public: 682 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 683 : StoreNode(c, mem, adr, at, val, mo) {} 684 virtual int Opcode() const; 685 virtual BasicType memory_type() const { return T_INT; } 686 }; 687 688 //------------------------------StoreLNode------------------------------------- 689 // Store long to memory 690 class StoreLNode : public StoreNode { 691 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 692 virtual bool cmp( const Node &n ) const { 693 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 694 && StoreNode::cmp(n); 695 } 696 virtual uint size_of() const { return sizeof(*this); } 697 const bool _require_atomic_access; // is piecewise store forbidden? 698 699 public: 700 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 701 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 702 virtual int Opcode() const; 703 virtual BasicType memory_type() const { return T_LONG; } 704 bool require_atomic_access() const { return _require_atomic_access; } 705 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 706 #ifndef PRODUCT 707 virtual void dump_spec(outputStream *st) const { 708 StoreNode::dump_spec(st); 709 if (_require_atomic_access) st->print(" Atomic!"); 710 } 711 #endif 712 }; 713 714 //------------------------------StoreFNode------------------------------------- 715 // Store float to memory 716 class StoreFNode : public StoreNode { 717 public: 718 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 719 : StoreNode(c, mem, adr, at, val, mo) {} 720 virtual int Opcode() const; 721 virtual BasicType memory_type() const { return T_FLOAT; } 722 }; 723 724 //------------------------------StoreDNode------------------------------------- 725 // Store double to memory 726 class StoreDNode : public StoreNode { 727 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 728 virtual bool cmp( const Node &n ) const { 729 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 730 && StoreNode::cmp(n); 731 } 732 virtual uint size_of() const { return sizeof(*this); } 733 const bool _require_atomic_access; // is piecewise store forbidden? 734 public: 735 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 736 MemOrd mo, bool require_atomic_access = false) 737 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 738 virtual int Opcode() const; 739 virtual BasicType memory_type() const { return T_DOUBLE; } 740 bool require_atomic_access() const { return _require_atomic_access; } 741 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 742 #ifndef PRODUCT 743 virtual void dump_spec(outputStream *st) const { 744 StoreNode::dump_spec(st); 745 if (_require_atomic_access) st->print(" Atomic!"); 746 } 747 #endif 748 749 }; 750 751 //------------------------------StorePNode------------------------------------- 752 // Store pointer to memory 753 class StorePNode : public StoreNode { 754 public: 755 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 756 : StoreNode(c, mem, adr, at, val, mo) {} 757 virtual int Opcode() const; 758 virtual BasicType memory_type() const { return T_ADDRESS; } 759 }; 760 761 //------------------------------StoreNNode------------------------------------- 762 // Store narrow oop to memory 763 class StoreNNode : public StoreNode { 764 public: 765 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 766 : StoreNode(c, mem, adr, at, val, mo) {} 767 virtual int Opcode() const; 768 virtual BasicType memory_type() const { return T_NARROWOOP; } 769 }; 770 771 //------------------------------StoreNKlassNode-------------------------------------- 772 // Store narrow klass to memory 773 class StoreNKlassNode : public StoreNNode { 774 public: 775 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 776 : StoreNNode(c, mem, adr, at, val, mo) {} 777 virtual int Opcode() const; 778 virtual BasicType memory_type() const { return T_NARROWKLASS; } 779 }; 780 781 //------------------------------StoreCMNode----------------------------------- 782 // Store card-mark byte to memory for CM 783 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 784 // Preceeding equivalent StoreCMs may be eliminated. 785 class StoreCMNode : public StoreNode { 786 private: 787 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 788 virtual bool cmp( const Node &n ) const { 789 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 790 && StoreNode::cmp(n); 791 } 792 virtual uint size_of() const { return sizeof(*this); } 793 int _oop_alias_idx; // The alias_idx of OopStore 794 795 public: 796 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 797 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 798 _oop_alias_idx(oop_alias_idx) { 799 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 800 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 801 "bad oop alias idx"); 802 } 803 virtual int Opcode() const; 804 virtual Node* Identity(PhaseGVN* phase); 805 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 806 virtual const Type* Value(PhaseGVN* phase) const; 807 virtual BasicType memory_type() const { return T_VOID; } // unspecific 808 int oop_alias_idx() const { return _oop_alias_idx; } 809 }; 810 811 //------------------------------LoadPLockedNode--------------------------------- 812 // Load-locked a pointer from memory (either object or array). 813 // On Sparc & Intel this is implemented as a normal pointer load. 814 // On PowerPC and friends it's a real load-locked. 815 class LoadPLockedNode : public LoadPNode { 816 public: 817 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 818 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 819 virtual int Opcode() const; 820 virtual int store_Opcode() const { return Op_StorePConditional; } 821 virtual bool depends_only_on_test() const { return true; } 822 }; 823 824 //------------------------------SCMemProjNode--------------------------------------- 825 // This class defines a projection of the memory state of a store conditional node. 826 // These nodes return a value, but also update memory. 827 class SCMemProjNode : public ProjNode { 828 public: 829 enum {SCMEMPROJCON = (uint)-2}; 830 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 831 virtual int Opcode() const; 832 virtual bool is_CFG() const { return false; } 833 virtual const Type *bottom_type() const {return Type::MEMORY;} 834 virtual const TypePtr *adr_type() const { 835 Node* ctrl = in(0); 836 if (ctrl == NULL) return NULL; // node is dead 837 return ctrl->in(MemNode::Memory)->adr_type(); 838 } 839 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 840 virtual const Type* Value(PhaseGVN* phase) const; 841 #ifndef PRODUCT 842 virtual void dump_spec(outputStream *st) const {}; 843 #endif 844 }; 845 846 //------------------------------LoadStoreNode--------------------------- 847 // Note: is_Mem() method returns 'true' for this class. 848 class LoadStoreNode : public Node { 849 private: 850 const Type* const _type; // What kind of value is loaded? 851 const TypePtr* _adr_type; // What kind of memory is being addressed? 852 bool _has_barrier; 853 virtual uint size_of() const; // Size is bigger 854 public: 855 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 856 virtual bool depends_only_on_test() const { return false; } 857 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 858 859 virtual const Type *bottom_type() const { return _type; } 860 virtual uint ideal_reg() const; 861 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 862 863 bool result_not_used() const; 864 MemBarNode* trailing_membar() const; 865 void set_has_barrier() { _has_barrier = true; }; 866 bool has_barrier() const { return _has_barrier; }; 867 }; 868 869 class LoadStoreConditionalNode : public LoadStoreNode { 870 public: 871 enum { 872 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 873 }; 874 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 875 }; 876 877 //------------------------------StorePConditionalNode--------------------------- 878 // Conditionally store pointer to memory, if no change since prior 879 // load-locked. Sets flags for success or failure of the store. 880 class StorePConditionalNode : public LoadStoreConditionalNode { 881 public: 882 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 883 virtual int Opcode() const; 884 // Produces flags 885 virtual uint ideal_reg() const { return Op_RegFlags; } 886 }; 887 888 //------------------------------StoreIConditionalNode--------------------------- 889 // Conditionally store int to memory, if no change since prior 890 // load-locked. Sets flags for success or failure of the store. 891 class StoreIConditionalNode : public LoadStoreConditionalNode { 892 public: 893 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 894 virtual int Opcode() const; 895 // Produces flags 896 virtual uint ideal_reg() const { return Op_RegFlags; } 897 }; 898 899 //------------------------------StoreLConditionalNode--------------------------- 900 // Conditionally store long to memory, if no change since prior 901 // load-locked. Sets flags for success or failure of the store. 902 class StoreLConditionalNode : public LoadStoreConditionalNode { 903 public: 904 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 905 virtual int Opcode() const; 906 // Produces flags 907 virtual uint ideal_reg() const { return Op_RegFlags; } 908 }; 909 910 class CompareAndSwapNode : public LoadStoreConditionalNode { 911 private: 912 const MemNode::MemOrd _mem_ord; 913 public: 914 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 915 MemNode::MemOrd order() const { 916 return _mem_ord; 917 } 918 }; 919 920 class CompareAndExchangeNode : public LoadStoreNode { 921 private: 922 const MemNode::MemOrd _mem_ord; 923 public: 924 enum { 925 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 926 }; 927 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 928 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 929 init_req(ExpectedIn, ex ); 930 } 931 932 MemNode::MemOrd order() const { 933 return _mem_ord; 934 } 935 }; 936 937 //------------------------------CompareAndSwapBNode--------------------------- 938 class CompareAndSwapBNode : public CompareAndSwapNode { 939 public: 940 CompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 941 virtual int Opcode() const; 942 }; 943 944 //------------------------------CompareAndSwapSNode--------------------------- 945 class CompareAndSwapSNode : public CompareAndSwapNode { 946 public: 947 CompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 948 virtual int Opcode() const; 949 }; 950 951 //------------------------------CompareAndSwapINode--------------------------- 952 class CompareAndSwapINode : public CompareAndSwapNode { 953 public: 954 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 955 virtual int Opcode() const; 956 }; 957 958 //------------------------------CompareAndSwapLNode--------------------------- 959 class CompareAndSwapLNode : public CompareAndSwapNode { 960 public: 961 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 962 virtual int Opcode() const; 963 }; 964 965 //------------------------------CompareAndSwapPNode--------------------------- 966 class CompareAndSwapPNode : public CompareAndSwapNode { 967 public: 968 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 969 virtual int Opcode() const; 970 }; 971 972 //------------------------------CompareAndSwapNNode--------------------------- 973 class CompareAndSwapNNode : public CompareAndSwapNode { 974 public: 975 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 976 virtual int Opcode() const; 977 }; 978 979 //------------------------------WeakCompareAndSwapBNode--------------------------- 980 class WeakCompareAndSwapBNode : public CompareAndSwapNode { 981 public: 982 WeakCompareAndSwapBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 983 virtual int Opcode() const; 984 }; 985 986 //------------------------------WeakCompareAndSwapSNode--------------------------- 987 class WeakCompareAndSwapSNode : public CompareAndSwapNode { 988 public: 989 WeakCompareAndSwapSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 990 virtual int Opcode() const; 991 }; 992 993 //------------------------------WeakCompareAndSwapINode--------------------------- 994 class WeakCompareAndSwapINode : public CompareAndSwapNode { 995 public: 996 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 997 virtual int Opcode() const; 998 }; 999 1000 //------------------------------WeakCompareAndSwapLNode--------------------------- 1001 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 1002 public: 1003 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1004 virtual int Opcode() const; 1005 }; 1006 1007 //------------------------------WeakCompareAndSwapPNode--------------------------- 1008 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 1009 public: 1010 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1011 virtual int Opcode() const; 1012 }; 1013 1014 //------------------------------WeakCompareAndSwapNNode--------------------------- 1015 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 1016 public: 1017 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 1018 virtual int Opcode() const; 1019 }; 1020 1021 //------------------------------CompareAndExchangeBNode--------------------------- 1022 class CompareAndExchangeBNode : public CompareAndExchangeNode { 1023 public: 1024 CompareAndExchangeBNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::BYTE) { } 1025 virtual int Opcode() const; 1026 }; 1027 1028 1029 //------------------------------CompareAndExchangeSNode--------------------------- 1030 class CompareAndExchangeSNode : public CompareAndExchangeNode { 1031 public: 1032 CompareAndExchangeSNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::SHORT) { } 1033 virtual int Opcode() const; 1034 }; 1035 1036 //------------------------------CompareAndExchangeLNode--------------------------- 1037 class CompareAndExchangeLNode : public CompareAndExchangeNode { 1038 public: 1039 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 1040 virtual int Opcode() const; 1041 }; 1042 1043 1044 //------------------------------CompareAndExchangeINode--------------------------- 1045 class CompareAndExchangeINode : public CompareAndExchangeNode { 1046 public: 1047 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 1048 virtual int Opcode() const; 1049 }; 1050 1051 1052 //------------------------------CompareAndExchangePNode--------------------------- 1053 class CompareAndExchangePNode : public CompareAndExchangeNode { 1054 public: 1055 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1056 virtual int Opcode() const; 1057 }; 1058 1059 //------------------------------CompareAndExchangeNNode--------------------------- 1060 class CompareAndExchangeNNode : public CompareAndExchangeNode { 1061 public: 1062 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 1063 virtual int Opcode() const; 1064 }; 1065 1066 //------------------------------GetAndAddBNode--------------------------- 1067 class GetAndAddBNode : public LoadStoreNode { 1068 public: 1069 GetAndAddBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1070 virtual int Opcode() const; 1071 }; 1072 1073 //------------------------------GetAndAddSNode--------------------------- 1074 class GetAndAddSNode : public LoadStoreNode { 1075 public: 1076 GetAndAddSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1077 virtual int Opcode() const; 1078 }; 1079 1080 //------------------------------GetAndAddINode--------------------------- 1081 class GetAndAddINode : public LoadStoreNode { 1082 public: 1083 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1084 virtual int Opcode() const; 1085 }; 1086 1087 //------------------------------GetAndAddLNode--------------------------- 1088 class GetAndAddLNode : public LoadStoreNode { 1089 public: 1090 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1091 virtual int Opcode() const; 1092 }; 1093 1094 //------------------------------GetAndSetBNode--------------------------- 1095 class GetAndSetBNode : public LoadStoreNode { 1096 public: 1097 GetAndSetBNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::BYTE, 4) { } 1098 virtual int Opcode() const; 1099 }; 1100 1101 //------------------------------GetAndSetSNode--------------------------- 1102 class GetAndSetSNode : public LoadStoreNode { 1103 public: 1104 GetAndSetSNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::SHORT, 4) { } 1105 virtual int Opcode() const; 1106 }; 1107 1108 //------------------------------GetAndSetINode--------------------------- 1109 class GetAndSetINode : public LoadStoreNode { 1110 public: 1111 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 1112 virtual int Opcode() const; 1113 }; 1114 1115 //------------------------------GetAndSetLNode--------------------------- 1116 class GetAndSetLNode : public LoadStoreNode { 1117 public: 1118 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1119 virtual int Opcode() const; 1120 }; 1121 1122 //------------------------------GetAndSetPNode--------------------------- 1123 class GetAndSetPNode : public LoadStoreNode { 1124 public: 1125 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1126 virtual int Opcode() const; 1127 }; 1128 1129 //------------------------------GetAndSetNNode--------------------------- 1130 class GetAndSetNNode : public LoadStoreNode { 1131 public: 1132 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1133 virtual int Opcode() const; 1134 }; 1135 1136 //------------------------------ClearArray------------------------------------- 1137 class ClearArrayNode: public Node { 1138 private: 1139 bool _is_large; 1140 bool _word_copy_only; 1141 public: 1142 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, Node* val, bool is_large) 1143 : Node(ctrl, arymem, word_cnt, base, val), _is_large(is_large), 1144 _word_copy_only(val->bottom_type()->isa_long() && (!val->bottom_type()->is_long()->is_con() || val->bottom_type()->is_long()->get_con() != 0)) { 1145 init_class_id(Class_ClearArray); 1146 } 1147 virtual int Opcode() const; 1148 virtual const Type *bottom_type() const { return Type::MEMORY; } 1149 // ClearArray modifies array elements, and so affects only the 1150 // array memory addressed by the bottom_type of its base address. 1151 virtual const class TypePtr *adr_type() const; 1152 virtual Node* Identity(PhaseGVN* phase); 1153 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1154 virtual uint match_edge(uint idx) const; 1155 bool is_large() const { return _is_large; } 1156 bool word_copy_only() const { return _word_copy_only; } 1157 1158 // Clear the given area of an object or array. 1159 // The start offset must always be aligned mod BytesPerInt. 1160 // The end offset must always be aligned mod BytesPerLong. 1161 // Return the new memory. 1162 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1163 Node* val, 1164 Node* raw_val, 1165 intptr_t start_offset, 1166 intptr_t end_offset, 1167 PhaseGVN* phase); 1168 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1169 Node* val, 1170 Node* raw_val, 1171 intptr_t start_offset, 1172 Node* end_offset, 1173 PhaseGVN* phase); 1174 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1175 Node* raw_val, 1176 Node* start_offset, 1177 Node* end_offset, 1178 PhaseGVN* phase); 1179 // Return allocation input memory edge if it is different instance 1180 // or itself if it is the one we are looking for. 1181 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1182 }; 1183 1184 //------------------------------MemBar----------------------------------------- 1185 // There are different flavors of Memory Barriers to match the Java Memory 1186 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1187 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1188 // volatile-load. Monitor-exit and volatile-store act as Release: no 1189 // preceding ref can be moved to after them. We insert a MemBar-Release 1190 // before a FastUnlock or volatile-store. All volatiles need to be 1191 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1192 // separate it from any following volatile-load. 1193 class MemBarNode: public MultiNode { 1194 virtual uint hash() const ; // { return NO_HASH; } 1195 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1196 1197 virtual uint size_of() const { return sizeof(*this); } 1198 // Memory type this node is serializing. Usually either rawptr or bottom. 1199 const TypePtr* _adr_type; 1200 1201 // How is this membar related to a nearby memory access? 1202 enum { 1203 Standalone, 1204 TrailingLoad, 1205 TrailingStore, 1206 LeadingStore, 1207 TrailingLoadStore, 1208 LeadingLoadStore 1209 } _kind; 1210 1211 #ifdef ASSERT 1212 uint _pair_idx; 1213 #endif 1214 1215 public: 1216 enum { 1217 Precedent = TypeFunc::Parms // optional edge to force precedence 1218 }; 1219 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1220 virtual int Opcode() const = 0; 1221 virtual const class TypePtr *adr_type() const { return _adr_type; } 1222 virtual const Type* Value(PhaseGVN* phase) const; 1223 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1224 virtual uint match_edge(uint idx) const { return 0; } 1225 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1226 virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask); 1227 // Factory method. Builds a wide or narrow membar. 1228 // Optional 'precedent' becomes an extra edge if not null. 1229 static MemBarNode* make(Compile* C, int opcode, 1230 int alias_idx = Compile::AliasIdxBot, 1231 Node* precedent = NULL); 1232 1233 MemBarNode* trailing_membar() const; 1234 MemBarNode* leading_membar() const; 1235 1236 void set_trailing_load() { _kind = TrailingLoad; } 1237 bool trailing_load() const { return _kind == TrailingLoad; } 1238 bool trailing_store() const { return _kind == TrailingStore; } 1239 bool leading_store() const { return _kind == LeadingStore; } 1240 bool trailing_load_store() const { return _kind == TrailingLoadStore; } 1241 bool leading_load_store() const { return _kind == LeadingLoadStore; } 1242 bool trailing() const { return _kind == TrailingLoad || _kind == TrailingStore || _kind == TrailingLoadStore; } 1243 bool leading() const { return _kind == LeadingStore || _kind == LeadingLoadStore; } 1244 bool standalone() const { return _kind == Standalone; } 1245 1246 static void set_store_pair(MemBarNode* leading, MemBarNode* trailing); 1247 static void set_load_store_pair(MemBarNode* leading, MemBarNode* trailing); 1248 1249 void remove(PhaseIterGVN *igvn); 1250 }; 1251 1252 // "Acquire" - no following ref can move before (but earlier refs can 1253 // follow, like an early Load stalled in cache). Requires multi-cpu 1254 // visibility. Inserted after a volatile load. 1255 class MemBarAcquireNode: public MemBarNode { 1256 public: 1257 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1258 : MemBarNode(C, alias_idx, precedent) {} 1259 virtual int Opcode() const; 1260 }; 1261 1262 // "Acquire" - no following ref can move before (but earlier refs can 1263 // follow, like an early Load stalled in cache). Requires multi-cpu 1264 // visibility. Inserted independ of any load, as required 1265 // for intrinsic Unsafe.loadFence(). 1266 class LoadFenceNode: public MemBarNode { 1267 public: 1268 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1269 : MemBarNode(C, alias_idx, precedent) {} 1270 virtual int Opcode() const; 1271 }; 1272 1273 // "Release" - no earlier ref can move after (but later refs can move 1274 // up, like a speculative pipelined cache-hitting Load). Requires 1275 // multi-cpu visibility. Inserted before a volatile store. 1276 class MemBarReleaseNode: public MemBarNode { 1277 public: 1278 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1279 : MemBarNode(C, alias_idx, precedent) {} 1280 virtual int Opcode() const; 1281 }; 1282 1283 // "Release" - no earlier ref can move after (but later refs can move 1284 // up, like a speculative pipelined cache-hitting Load). Requires 1285 // multi-cpu visibility. Inserted independent of any store, as required 1286 // for intrinsic Unsafe.storeFence(). 1287 class StoreFenceNode: public MemBarNode { 1288 public: 1289 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1290 : MemBarNode(C, alias_idx, precedent) {} 1291 virtual int Opcode() const; 1292 }; 1293 1294 // "Acquire" - no following ref can move before (but earlier refs can 1295 // follow, like an early Load stalled in cache). Requires multi-cpu 1296 // visibility. Inserted after a FastLock. 1297 class MemBarAcquireLockNode: public MemBarNode { 1298 public: 1299 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1300 : MemBarNode(C, alias_idx, precedent) {} 1301 virtual int Opcode() const; 1302 }; 1303 1304 // "Release" - no earlier ref can move after (but later refs can move 1305 // up, like a speculative pipelined cache-hitting Load). Requires 1306 // multi-cpu visibility. Inserted before a FastUnLock. 1307 class MemBarReleaseLockNode: public MemBarNode { 1308 public: 1309 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1310 : MemBarNode(C, alias_idx, precedent) {} 1311 virtual int Opcode() const; 1312 }; 1313 1314 class MemBarStoreStoreNode: public MemBarNode { 1315 public: 1316 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1317 : MemBarNode(C, alias_idx, precedent) { 1318 init_class_id(Class_MemBarStoreStore); 1319 } 1320 virtual int Opcode() const; 1321 }; 1322 1323 // Ordering between a volatile store and a following volatile load. 1324 // Requires multi-CPU visibility? 1325 class MemBarVolatileNode: public MemBarNode { 1326 public: 1327 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1328 : MemBarNode(C, alias_idx, precedent) {} 1329 virtual int Opcode() const; 1330 }; 1331 1332 // Ordering within the same CPU. Used to order unsafe memory references 1333 // inside the compiler when we lack alias info. Not needed "outside" the 1334 // compiler because the CPU does all the ordering for us. 1335 class MemBarCPUOrderNode: public MemBarNode { 1336 public: 1337 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1338 : MemBarNode(C, alias_idx, precedent) {} 1339 virtual int Opcode() const; 1340 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1341 }; 1342 1343 class OnSpinWaitNode: public MemBarNode { 1344 public: 1345 OnSpinWaitNode(Compile* C, int alias_idx, Node* precedent) 1346 : MemBarNode(C, alias_idx, precedent) {} 1347 virtual int Opcode() const; 1348 }; 1349 1350 // Isolation of object setup after an AllocateNode and before next safepoint. 1351 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1352 class InitializeNode: public MemBarNode { 1353 friend class AllocateNode; 1354 1355 enum { 1356 Incomplete = 0, 1357 Complete = 1, 1358 WithArraycopy = 2 1359 }; 1360 int _is_complete; 1361 1362 bool _does_not_escape; 1363 1364 public: 1365 enum { 1366 Control = TypeFunc::Control, 1367 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1368 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1369 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1370 }; 1371 1372 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1373 virtual int Opcode() const; 1374 virtual uint size_of() const { return sizeof(*this); } 1375 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1376 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1377 1378 // Manage incoming memory edges via a MergeMem on in(Memory): 1379 Node* memory(uint alias_idx); 1380 1381 // The raw memory edge coming directly from the Allocation. 1382 // The contents of this memory are *always* all-zero-bits. 1383 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1384 1385 // Return the corresponding allocation for this initialization (or null if none). 1386 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1387 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1388 AllocateNode* allocation(); 1389 1390 // Anything other than zeroing in this init? 1391 bool is_non_zero(); 1392 1393 // An InitializeNode must completed before macro expansion is done. 1394 // Completion requires that the AllocateNode must be followed by 1395 // initialization of the new memory to zero, then to any initializers. 1396 bool is_complete() { return _is_complete != Incomplete; } 1397 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1398 1399 // Mark complete. (Must not yet be complete.) 1400 void set_complete(PhaseGVN* phase); 1401 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1402 1403 bool does_not_escape() { return _does_not_escape; } 1404 void set_does_not_escape() { _does_not_escape = true; } 1405 1406 #ifdef ASSERT 1407 // ensure all non-degenerate stores are ordered and non-overlapping 1408 bool stores_are_sane(PhaseTransform* phase); 1409 #endif //ASSERT 1410 1411 // See if this store can be captured; return offset where it initializes. 1412 // Return 0 if the store cannot be moved (any sort of problem). 1413 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1414 1415 // Capture another store; reformat it to write my internal raw memory. 1416 // Return the captured copy, else NULL if there is some sort of problem. 1417 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1418 1419 // Find captured store which corresponds to the range [start..start+size). 1420 // Return my own memory projection (meaning the initial zero bits) 1421 // if there is no such store. Return NULL if there is a problem. 1422 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1423 1424 // Called when the associated AllocateNode is expanded into CFG. 1425 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1426 intptr_t header_size, Node* size_in_bytes, 1427 PhaseGVN* phase); 1428 1429 private: 1430 void remove_extra_zeroes(); 1431 1432 // Find out where a captured store should be placed (or already is placed). 1433 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1434 PhaseTransform* phase); 1435 1436 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1437 1438 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1439 1440 bool detect_init_independence(Node* n, int& count); 1441 1442 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1443 PhaseGVN* phase); 1444 1445 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1446 }; 1447 1448 //------------------------------MergeMem--------------------------------------- 1449 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1450 class MergeMemNode: public Node { 1451 virtual uint hash() const ; // { return NO_HASH; } 1452 virtual bool cmp( const Node &n ) const ; // Always fail, except on self 1453 friend class MergeMemStream; 1454 MergeMemNode(Node* def); // clients use MergeMemNode::make 1455 1456 public: 1457 // If the input is a whole memory state, clone it with all its slices intact. 1458 // Otherwise, make a new memory state with just that base memory input. 1459 // In either case, the result is a newly created MergeMem. 1460 static MergeMemNode* make(Node* base_memory); 1461 1462 virtual int Opcode() const; 1463 virtual Node* Identity(PhaseGVN* phase); 1464 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1465 virtual uint ideal_reg() const { return NotAMachineReg; } 1466 virtual uint match_edge(uint idx) const { return 0; } 1467 virtual const RegMask &out_RegMask() const; 1468 virtual const Type *bottom_type() const { return Type::MEMORY; } 1469 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1470 // sparse accessors 1471 // Fetch the previously stored "set_memory_at", or else the base memory. 1472 // (Caller should clone it if it is a phi-nest.) 1473 Node* memory_at(uint alias_idx) const; 1474 // set the memory, regardless of its previous value 1475 void set_memory_at(uint alias_idx, Node* n); 1476 // the "base" is the memory that provides the non-finite support 1477 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1478 // warning: setting the base can implicitly set any of the other slices too 1479 void set_base_memory(Node* def); 1480 // sentinel value which denotes a copy of the base memory: 1481 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1482 static Node* make_empty_memory(); // where the sentinel comes from 1483 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1484 // hook for the iterator, to perform any necessary setup 1485 void iteration_setup(const MergeMemNode* other = NULL); 1486 // push sentinels until I am at least as long as the other (semantic no-op) 1487 void grow_to_match(const MergeMemNode* other); 1488 bool verify_sparse() const PRODUCT_RETURN0; 1489 #ifndef PRODUCT 1490 virtual void dump_spec(outputStream *st) const; 1491 #endif 1492 }; 1493 1494 class MergeMemStream : public StackObj { 1495 private: 1496 MergeMemNode* _mm; 1497 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1498 Node* _mm_base; // loop-invariant base memory of _mm 1499 int _idx; 1500 int _cnt; 1501 Node* _mem; 1502 Node* _mem2; 1503 int _cnt2; 1504 1505 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1506 // subsume_node will break sparseness at times, whenever a memory slice 1507 // folds down to a copy of the base ("fat") memory. In such a case, 1508 // the raw edge will update to base, although it should be top. 1509 // This iterator will recognize either top or base_memory as an 1510 // "empty" slice. See is_empty, is_empty2, and next below. 1511 // 1512 // The sparseness property is repaired in MergeMemNode::Ideal. 1513 // As long as access to a MergeMem goes through this iterator 1514 // or the memory_at accessor, flaws in the sparseness will 1515 // never be observed. 1516 // 1517 // Also, iteration_setup repairs sparseness. 1518 assert(mm->verify_sparse(), "please, no dups of base"); 1519 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1520 1521 _mm = mm; 1522 _mm_base = mm->base_memory(); 1523 _mm2 = mm2; 1524 _cnt = mm->req(); 1525 _idx = Compile::AliasIdxBot-1; // start at the base memory 1526 _mem = NULL; 1527 _mem2 = NULL; 1528 } 1529 1530 #ifdef ASSERT 1531 Node* check_memory() const { 1532 if (at_base_memory()) 1533 return _mm->base_memory(); 1534 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1535 return _mm->memory_at(_idx); 1536 else 1537 return _mm_base; 1538 } 1539 Node* check_memory2() const { 1540 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1541 } 1542 #endif 1543 1544 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1545 void assert_synch() const { 1546 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1547 "no side-effects except through the stream"); 1548 } 1549 1550 public: 1551 1552 // expected usages: 1553 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1554 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1555 1556 // iterate over one merge 1557 MergeMemStream(MergeMemNode* mm) { 1558 mm->iteration_setup(); 1559 init(mm); 1560 debug_only(_cnt2 = 999); 1561 } 1562 // iterate in parallel over two merges 1563 // only iterates through non-empty elements of mm2 1564 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1565 assert(mm2, "second argument must be a MergeMem also"); 1566 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1567 mm->iteration_setup(mm2); 1568 init(mm, mm2); 1569 _cnt2 = mm2->req(); 1570 } 1571 #ifdef ASSERT 1572 ~MergeMemStream() { 1573 assert_synch(); 1574 } 1575 #endif 1576 1577 MergeMemNode* all_memory() const { 1578 return _mm; 1579 } 1580 Node* base_memory() const { 1581 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1582 return _mm_base; 1583 } 1584 const MergeMemNode* all_memory2() const { 1585 assert(_mm2 != NULL, ""); 1586 return _mm2; 1587 } 1588 bool at_base_memory() const { 1589 return _idx == Compile::AliasIdxBot; 1590 } 1591 int alias_idx() const { 1592 assert(_mem, "must call next 1st"); 1593 return _idx; 1594 } 1595 1596 const TypePtr* adr_type() const { 1597 return Compile::current()->get_adr_type(alias_idx()); 1598 } 1599 1600 const TypePtr* adr_type(Compile* C) const { 1601 return C->get_adr_type(alias_idx()); 1602 } 1603 bool is_empty() const { 1604 assert(_mem, "must call next 1st"); 1605 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1606 return _mem->is_top(); 1607 } 1608 bool is_empty2() const { 1609 assert(_mem2, "must call next 1st"); 1610 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1611 return _mem2->is_top(); 1612 } 1613 Node* memory() const { 1614 assert(!is_empty(), "must not be empty"); 1615 assert_synch(); 1616 return _mem; 1617 } 1618 // get the current memory, regardless of empty or non-empty status 1619 Node* force_memory() const { 1620 assert(!is_empty() || !at_base_memory(), ""); 1621 // Use _mm_base to defend against updates to _mem->base_memory(). 1622 Node *mem = _mem->is_top() ? _mm_base : _mem; 1623 assert(mem == check_memory(), ""); 1624 return mem; 1625 } 1626 Node* memory2() const { 1627 assert(_mem2 == check_memory2(), ""); 1628 return _mem2; 1629 } 1630 void set_memory(Node* mem) { 1631 if (at_base_memory()) { 1632 // Note that this does not change the invariant _mm_base. 1633 _mm->set_base_memory(mem); 1634 } else { 1635 _mm->set_memory_at(_idx, mem); 1636 } 1637 _mem = mem; 1638 assert_synch(); 1639 } 1640 1641 // Recover from a side effect to the MergeMemNode. 1642 void set_memory() { 1643 _mem = _mm->in(_idx); 1644 } 1645 1646 bool next() { return next(false); } 1647 bool next2() { return next(true); } 1648 1649 bool next_non_empty() { return next_non_empty(false); } 1650 bool next_non_empty2() { return next_non_empty(true); } 1651 // next_non_empty2 can yield states where is_empty() is true 1652 1653 private: 1654 // find the next item, which might be empty 1655 bool next(bool have_mm2) { 1656 assert((_mm2 != NULL) == have_mm2, "use other next"); 1657 assert_synch(); 1658 if (++_idx < _cnt) { 1659 // Note: This iterator allows _mm to be non-sparse. 1660 // It behaves the same whether _mem is top or base_memory. 1661 _mem = _mm->in(_idx); 1662 if (have_mm2) 1663 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1664 return true; 1665 } 1666 return false; 1667 } 1668 1669 // find the next non-empty item 1670 bool next_non_empty(bool have_mm2) { 1671 while (next(have_mm2)) { 1672 if (!is_empty()) { 1673 // make sure _mem2 is filled in sensibly 1674 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1675 return true; 1676 } else if (have_mm2 && !is_empty2()) { 1677 return true; // is_empty() == true 1678 } 1679 } 1680 return false; 1681 } 1682 }; 1683 1684 // cachewb node for guaranteeing writeback of the cache line at a 1685 // given address to (non-volatile) RAM 1686 class CacheWBNode : public Node { 1687 public: 1688 CacheWBNode(Node *ctrl, Node *mem, Node *addr) : Node(ctrl, mem, addr) {} 1689 virtual int Opcode() const; 1690 virtual uint ideal_reg() const { return NotAMachineReg; } 1691 virtual uint match_edge(uint idx) const { return (idx == 2); } 1692 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1693 virtual const Type *bottom_type() const { return Type::MEMORY; } 1694 }; 1695 1696 // cachewb pre sync node for ensuring that writebacks are serialised 1697 // relative to preceding or following stores 1698 class CacheWBPreSyncNode : public Node { 1699 public: 1700 CacheWBPreSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1701 virtual int Opcode() const; 1702 virtual uint ideal_reg() const { return NotAMachineReg; } 1703 virtual uint match_edge(uint idx) const { return false; } 1704 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1705 virtual const Type *bottom_type() const { return Type::MEMORY; } 1706 }; 1707 1708 // cachewb pre sync node for ensuring that writebacks are serialised 1709 // relative to preceding or following stores 1710 class CacheWBPostSyncNode : public Node { 1711 public: 1712 CacheWBPostSyncNode(Node *ctrl, Node *mem) : Node(ctrl, mem) {} 1713 virtual int Opcode() const; 1714 virtual uint ideal_reg() const { return NotAMachineReg; } 1715 virtual uint match_edge(uint idx) const { return false; } 1716 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1717 virtual const Type *bottom_type() const { return Type::MEMORY; } 1718 }; 1719 1720 //------------------------------Prefetch--------------------------------------- 1721 1722 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1723 class PrefetchAllocationNode : public Node { 1724 public: 1725 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1726 virtual int Opcode() const; 1727 virtual uint ideal_reg() const { return NotAMachineReg; } 1728 virtual uint match_edge(uint idx) const { return idx==2; } 1729 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1730 }; 1731 1732 #endif // SHARE_OPTO_MEMNODE_HPP