1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP 26 #define SHARE_VM_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 private: 43 bool _unaligned_access; // Unaligned access from unsafe 44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 45 protected: 46 #ifdef ASSERT 47 const TypePtr* _adr_type; // What kind of memory is being addressed? 48 #endif 49 virtual uint size_of() const; 50 public: 51 enum { Control, // When is it safe to do this load? 52 Memory, // Chunk of memory is being loaded from 53 Address, // Actually address, derived from base 54 ValueIn, // Value to store 55 OopStore // Preceeding oop store, only in StoreCM 56 }; 57 typedef enum { unordered = 0, 58 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 59 release, // Store has to release or be preceded by MemBarRelease. 60 seqcst, // LoadStore has to have both acquire and release semantics. 61 unset // The memory ordering is not set (used for testing) 62 } MemOrd; 63 protected: 64 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 65 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { 66 init_class_id(Class_Mem); 67 debug_only(_adr_type=at; adr_type();) 68 } 69 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 70 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { 71 init_class_id(Class_Mem); 72 debug_only(_adr_type=at; adr_type();) 73 } 74 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 75 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { 76 init_class_id(Class_Mem); 77 debug_only(_adr_type=at; adr_type();) 78 } 79 80 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } 81 82 public: 83 // Helpers for the optimizer. Documented in memnode.cpp. 84 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 85 Node* p2, AllocateNode* a2, 86 PhaseTransform* phase); 87 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 88 89 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 90 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 91 // This one should probably be a phase-specific function: 92 static bool all_controls_dominate(Node* dom, Node* sub); 93 94 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 95 96 // Shared code for Ideal methods: 97 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 98 99 // Helper function for adr_type() implementations. 100 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 101 102 // Raw access function, to allow copying of adr_type efficiently in 103 // product builds and retain the debug info for debug builds. 104 const TypePtr *raw_adr_type() const { 105 #ifdef ASSERT 106 return _adr_type; 107 #else 108 return 0; 109 #endif 110 } 111 112 // Map a load or store opcode to its corresponding store opcode. 113 // (Return -1 if unknown.) 114 virtual int store_Opcode() const { return -1; } 115 116 // What is the type of the value in memory? (T_VOID mean "unspecified".) 117 virtual BasicType memory_type() const = 0; 118 virtual int memory_size() const { 119 #ifdef ASSERT 120 return type2aelembytes(memory_type(), true); 121 #else 122 return type2aelembytes(memory_type()); 123 #endif 124 } 125 126 // Search through memory states which precede this node (load or store). 127 // Look for an exact match for the address, with no intervening 128 // aliased stores. 129 Node* find_previous_store(PhaseTransform* phase); 130 131 // Can this node (load or store) accurately see a stored value in 132 // the given memory state? (The state may or may not be in(Memory).) 133 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 134 135 void set_unaligned_access() { _unaligned_access = true; } 136 bool is_unaligned_access() const { return _unaligned_access; } 137 void set_mismatched_access() { _mismatched_access = true; } 138 bool is_mismatched_access() const { return _mismatched_access; } 139 140 #ifndef PRODUCT 141 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 142 virtual void dump_spec(outputStream *st) const; 143 #endif 144 }; 145 146 //------------------------------LoadNode--------------------------------------- 147 // Load value; requires Memory and Address 148 class LoadNode : public MemNode { 149 public: 150 // Some loads (from unsafe) should be pinned: they don't depend only 151 // on the dominating test. The boolean field _depends_only_on_test 152 // below records whether that node depends only on the dominating 153 // test. 154 // Methods used to build LoadNodes pass an argument of type enum 155 // ControlDependency instead of a boolean because those methods 156 // typically have multiple boolean parameters with default values: 157 // passing the wrong boolean to one of these parameters by mistake 158 // goes easily unnoticed. Using an enum, the compiler can check that 159 // the type of a value and the type of the parameter match. 160 enum ControlDependency { 161 Pinned, 162 DependsOnlyOnTest 163 }; 164 private: 165 // LoadNode::hash() doesn't take the _depends_only_on_test field 166 // into account: If the graph already has a non-pinned LoadNode and 167 // we add a pinned LoadNode with the same inputs, it's safe for GVN 168 // to replace the pinned LoadNode with the non-pinned LoadNode, 169 // otherwise it wouldn't be safe to have a non pinned LoadNode with 170 // those inputs in the first place. If the graph already has a 171 // pinned LoadNode and we add a non pinned LoadNode with the same 172 // inputs, it's safe (but suboptimal) for GVN to replace the 173 // non-pinned LoadNode by the pinned LoadNode. 174 bool _depends_only_on_test; 175 176 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 177 // loads that can be reordered, and such requiring acquire semantics to 178 // adhere to the Java specification. The required behaviour is stored in 179 // this field. 180 const MemOrd _mo; 181 182 protected: 183 virtual uint cmp(const Node &n) const; 184 virtual uint size_of() const; // Size is bigger 185 // Should LoadNode::Ideal() attempt to remove control edges? 186 virtual bool can_remove_control() const; 187 const Type* const _type; // What kind of value is loaded? 188 189 virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const; 190 public: 191 192 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) 193 : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) { 194 init_class_id(Class_Load); 195 } 196 inline bool is_unordered() const { return !is_acquire(); } 197 inline bool is_acquire() const { 198 assert(_mo == unordered || _mo == acquire, "unexpected"); 199 return _mo == acquire; 200 } 201 inline bool is_unsigned() const { 202 int lop = Opcode(); 203 return (lop == Op_LoadUB) || (lop == Op_LoadUS); 204 } 205 206 // Polymorphic factory method: 207 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 208 const TypePtr* at, const Type *rt, BasicType bt, 209 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 210 bool unaligned = false, bool mismatched = false); 211 212 virtual uint hash() const; // Check the type 213 214 // Handle algebraic identities here. If we have an identity, return the Node 215 // we are equivalent to. We look for Load of a Store. 216 virtual Node* Identity(PhaseGVN* phase); 217 218 // If the load is from Field memory and the pointer is non-null, it might be possible to 219 // zero out the control input. 220 // If the offset is constant and the base is an object allocation, 221 // try to hook me up to the exact initializing store. 222 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 223 224 // Split instance field load through Phi. 225 Node* split_through_phi(PhaseGVN *phase); 226 227 // Recover original value from boxed values 228 Node *eliminate_autobox(PhaseGVN *phase); 229 230 // Compute a new Type for this node. Basically we just do the pre-check, 231 // then call the virtual add() to set the type. 232 virtual const Type* Value(PhaseGVN* phase) const; 233 234 // Common methods for LoadKlass and LoadNKlass nodes. 235 const Type* klass_value_common(PhaseGVN* phase) const; 236 Node* klass_identity_common(PhaseGVN* phase); 237 238 virtual uint ideal_reg() const; 239 virtual const Type *bottom_type() const; 240 // Following method is copied from TypeNode: 241 void set_type(const Type* t) { 242 assert(t != NULL, "sanity"); 243 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 244 *(const Type**)&_type = t; // cast away const-ness 245 // If this node is in the hash table, make sure it doesn't need a rehash. 246 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 247 } 248 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 249 250 // Do not match memory edge 251 virtual uint match_edge(uint idx) const; 252 253 // Map a load opcode to its corresponding store opcode. 254 virtual int store_Opcode() const = 0; 255 256 // Check if the load's memory input is a Phi node with the same control. 257 bool is_instance_field_load_with_local_phi(Node* ctrl); 258 259 #ifndef PRODUCT 260 virtual void dump_spec(outputStream *st) const; 261 #endif 262 #ifdef ASSERT 263 // Helper function to allow a raw load without control edge for some cases 264 static bool is_immutable_value(Node* adr); 265 #endif 266 protected: 267 const Type* load_array_final_field(const TypeKlassPtr *tkls, 268 ciKlass* klass) const; 269 270 Node* can_see_arraycopy_value(Node* st, PhaseTransform* phase) const; 271 272 // depends_only_on_test is almost always true, and needs to be almost always 273 // true to enable key hoisting & commoning optimizations. However, for the 274 // special case of RawPtr loads from TLS top & end, and other loads performed by 275 // GC barriers, the control edge carries the dependence preventing hoisting past 276 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 277 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 278 // which produce results (new raw memory state) inside of loops preventing all 279 // manner of other optimizations). Basically, it's ugly but so is the alternative. 280 // See comment in macro.cpp, around line 125 expand_allocate_common(). 281 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; } 282 }; 283 284 //------------------------------LoadBNode-------------------------------------- 285 // Load a byte (8bits signed) from memory 286 class LoadBNode : public LoadNode { 287 public: 288 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 289 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 290 virtual int Opcode() const; 291 virtual uint ideal_reg() const { return Op_RegI; } 292 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 293 virtual const Type* Value(PhaseGVN* phase) const; 294 virtual int store_Opcode() const { return Op_StoreB; } 295 virtual BasicType memory_type() const { return T_BYTE; } 296 }; 297 298 //------------------------------LoadUBNode------------------------------------- 299 // Load a unsigned byte (8bits unsigned) from memory 300 class LoadUBNode : public LoadNode { 301 public: 302 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 303 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 304 virtual int Opcode() const; 305 virtual uint ideal_reg() const { return Op_RegI; } 306 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 307 virtual const Type* Value(PhaseGVN* phase) const; 308 virtual int store_Opcode() const { return Op_StoreB; } 309 virtual BasicType memory_type() const { return T_BYTE; } 310 }; 311 312 //------------------------------LoadUSNode------------------------------------- 313 // Load an unsigned short/char (16bits unsigned) from memory 314 class LoadUSNode : public LoadNode { 315 public: 316 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 317 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 318 virtual int Opcode() const; 319 virtual uint ideal_reg() const { return Op_RegI; } 320 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 321 virtual const Type* Value(PhaseGVN* phase) const; 322 virtual int store_Opcode() const { return Op_StoreC; } 323 virtual BasicType memory_type() const { return T_CHAR; } 324 }; 325 326 //------------------------------LoadSNode-------------------------------------- 327 // Load a short (16bits signed) from memory 328 class LoadSNode : public LoadNode { 329 public: 330 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 331 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 332 virtual int Opcode() const; 333 virtual uint ideal_reg() const { return Op_RegI; } 334 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 335 virtual const Type* Value(PhaseGVN* phase) const; 336 virtual int store_Opcode() const { return Op_StoreC; } 337 virtual BasicType memory_type() const { return T_SHORT; } 338 }; 339 340 //------------------------------LoadINode-------------------------------------- 341 // Load an integer from memory 342 class LoadINode : public LoadNode { 343 public: 344 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 345 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} 346 virtual int Opcode() const; 347 virtual uint ideal_reg() const { return Op_RegI; } 348 virtual int store_Opcode() const { return Op_StoreI; } 349 virtual BasicType memory_type() const { return T_INT; } 350 }; 351 352 //------------------------------LoadRangeNode---------------------------------- 353 // Load an array length from the array 354 class LoadRangeNode : public LoadINode { 355 public: 356 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 357 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 358 virtual int Opcode() const; 359 virtual const Type* Value(PhaseGVN* phase) const; 360 virtual Node* Identity(PhaseGVN* phase); 361 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 362 }; 363 364 //------------------------------LoadLNode-------------------------------------- 365 // Load a long from memory 366 class LoadLNode : public LoadNode { 367 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 368 virtual uint cmp( const Node &n ) const { 369 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 370 && LoadNode::cmp(n); 371 } 372 virtual uint size_of() const { return sizeof(*this); } 373 const bool _require_atomic_access; // is piecewise load forbidden? 374 375 public: 376 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 377 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 378 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 379 virtual int Opcode() const; 380 virtual uint ideal_reg() const { return Op_RegL; } 381 virtual int store_Opcode() const { return Op_StoreL; } 382 virtual BasicType memory_type() const { return T_LONG; } 383 bool require_atomic_access() const { return _require_atomic_access; } 384 static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 385 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 386 bool unaligned = false, bool mismatched = false); 387 #ifndef PRODUCT 388 virtual void dump_spec(outputStream *st) const { 389 LoadNode::dump_spec(st); 390 if (_require_atomic_access) st->print(" Atomic!"); 391 } 392 #endif 393 }; 394 395 //------------------------------LoadL_unalignedNode---------------------------- 396 // Load a long from unaligned memory 397 class LoadL_unalignedNode : public LoadLNode { 398 public: 399 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 400 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} 401 virtual int Opcode() const; 402 }; 403 404 //------------------------------LoadFNode-------------------------------------- 405 // Load a float (64 bits) from memory 406 class LoadFNode : public LoadNode { 407 public: 408 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 409 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 410 virtual int Opcode() const; 411 virtual uint ideal_reg() const { return Op_RegF; } 412 virtual int store_Opcode() const { return Op_StoreF; } 413 virtual BasicType memory_type() const { return T_FLOAT; } 414 }; 415 416 //------------------------------LoadDNode-------------------------------------- 417 // Load a double (64 bits) from memory 418 class LoadDNode : public LoadNode { 419 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 420 virtual uint cmp( const Node &n ) const { 421 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access 422 && LoadNode::cmp(n); 423 } 424 virtual uint size_of() const { return sizeof(*this); } 425 const bool _require_atomic_access; // is piecewise load forbidden? 426 427 public: 428 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, 429 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) 430 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} 431 virtual int Opcode() const; 432 virtual uint ideal_reg() const { return Op_RegD; } 433 virtual int store_Opcode() const { return Op_StoreD; } 434 virtual BasicType memory_type() const { return T_DOUBLE; } 435 bool require_atomic_access() const { return _require_atomic_access; } 436 static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 437 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, 438 bool unaligned = false, bool mismatched = false); 439 #ifndef PRODUCT 440 virtual void dump_spec(outputStream *st) const { 441 LoadNode::dump_spec(st); 442 if (_require_atomic_access) st->print(" Atomic!"); 443 } 444 #endif 445 }; 446 447 //------------------------------LoadD_unalignedNode---------------------------- 448 // Load a double from unaligned memory 449 class LoadD_unalignedNode : public LoadDNode { 450 public: 451 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 452 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} 453 virtual int Opcode() const; 454 }; 455 456 //------------------------------LoadPNode-------------------------------------- 457 // Load a pointer from memory (either object or array) 458 class LoadPNode : public LoadNode { 459 public: 460 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 461 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 462 virtual int Opcode() const; 463 virtual uint ideal_reg() const { return Op_RegP; } 464 virtual int store_Opcode() const { return Op_StoreP; } 465 virtual BasicType memory_type() const { return T_ADDRESS; } 466 }; 467 468 469 //------------------------------LoadNNode-------------------------------------- 470 // Load a narrow oop from memory (either object or array) 471 class LoadNNode : public LoadNode { 472 public: 473 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) 474 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} 475 virtual int Opcode() const; 476 virtual uint ideal_reg() const { return Op_RegN; } 477 virtual int store_Opcode() const { return Op_StoreN; } 478 virtual BasicType memory_type() const { return T_NARROWOOP; } 479 }; 480 481 //------------------------------LoadKlassNode---------------------------------- 482 // Load a Klass from an object 483 class LoadKlassNode : public LoadPNode { 484 protected: 485 // In most cases, LoadKlassNode does not have the control input set. If the control 486 // input is set, it must not be removed (by LoadNode::Ideal()). 487 virtual bool can_remove_control() const; 488 public: 489 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 490 : LoadPNode(c, mem, adr, at, tk, mo) {} 491 virtual int Opcode() const; 492 virtual const Type* Value(PhaseGVN* phase) const; 493 virtual Node* Identity(PhaseGVN* phase); 494 virtual bool depends_only_on_test() const { return true; } 495 496 // Polymorphic factory method: 497 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, 498 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); 499 }; 500 501 //------------------------------LoadNKlassNode--------------------------------- 502 // Load a narrow Klass from an object. 503 class LoadNKlassNode : public LoadNNode { 504 public: 505 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 506 : LoadNNode(c, mem, adr, at, tk, mo) {} 507 virtual int Opcode() const; 508 virtual uint ideal_reg() const { return Op_RegN; } 509 virtual int store_Opcode() const { return Op_StoreNKlass; } 510 virtual BasicType memory_type() const { return T_NARROWKLASS; } 511 512 virtual const Type* Value(PhaseGVN* phase) const; 513 virtual Node* Identity(PhaseGVN* phase); 514 virtual bool depends_only_on_test() const { return true; } 515 }; 516 517 518 //------------------------------StoreNode-------------------------------------- 519 // Store value; requires Store, Address and Value 520 class StoreNode : public MemNode { 521 private: 522 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 523 // stores that can be reordered, and such requiring release semantics to 524 // adhere to the Java specification. The required behaviour is stored in 525 // this field. 526 const MemOrd _mo; 527 // Needed for proper cloning. 528 virtual uint size_of() const { return sizeof(*this); } 529 protected: 530 virtual uint cmp( const Node &n ) const; 531 virtual bool depends_only_on_test() const { return false; } 532 533 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 534 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 535 536 public: 537 // We must ensure that stores of object references will be visible 538 // only after the object's initialization. So the callers of this 539 // procedure must indicate that the store requires `release' 540 // semantics, if the stored value is an object reference that might 541 // point to a new object and may become externally visible. 542 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 543 : MemNode(c, mem, adr, at, val), _mo(mo) { 544 init_class_id(Class_Store); 545 } 546 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 547 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 548 init_class_id(Class_Store); 549 } 550 551 inline bool is_unordered() const { return !is_release(); } 552 inline bool is_release() const { 553 assert((_mo == unordered || _mo == release), "unexpected"); 554 return _mo == release; 555 } 556 557 // Conservatively release stores of object references in order to 558 // ensure visibility of object initialization. 559 static inline MemOrd release_if_reference(const BasicType t) { 560 #ifdef AARCH64 561 // AArch64 doesn't need a release store here because object 562 // initialization contains the necessary barriers. 563 return unordered; 564 #else 565 const MemOrd mo = (t == T_ARRAY || 566 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 567 t == T_OBJECT) ? release : unordered; 568 return mo; 569 #endif 570 } 571 572 // Polymorphic factory method 573 // 574 // We must ensure that stores of object references will be visible 575 // only after the object's initialization. So the callers of this 576 // procedure must indicate that the store requires `release' 577 // semantics, if the stored value is an object reference that might 578 // point to a new object and may become externally visible. 579 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 580 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 581 582 virtual uint hash() const; // Check the type 583 584 // If the store is to Field memory and the pointer is non-null, we can 585 // zero out the control input. 586 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 587 588 // Compute a new Type for this node. Basically we just do the pre-check, 589 // then call the virtual add() to set the type. 590 virtual const Type* Value(PhaseGVN* phase) const; 591 592 // Check for identity function on memory (Load then Store at same address) 593 virtual Node* Identity(PhaseGVN* phase); 594 595 // Do not match memory edge 596 virtual uint match_edge(uint idx) const; 597 598 virtual const Type *bottom_type() const; // returns Type::MEMORY 599 600 // Map a store opcode to its corresponding own opcode, trivially. 601 virtual int store_Opcode() const { return Opcode(); } 602 603 // have all possible loads of the value stored been optimized away? 604 bool value_never_loaded(PhaseTransform *phase) const; 605 }; 606 607 //------------------------------StoreBNode------------------------------------- 608 // Store byte to memory 609 class StoreBNode : public StoreNode { 610 public: 611 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 612 : StoreNode(c, mem, adr, at, val, mo) {} 613 virtual int Opcode() const; 614 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 615 virtual BasicType memory_type() const { return T_BYTE; } 616 }; 617 618 //------------------------------StoreCNode------------------------------------- 619 // Store char/short to memory 620 class StoreCNode : public StoreNode { 621 public: 622 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 623 : StoreNode(c, mem, adr, at, val, mo) {} 624 virtual int Opcode() const; 625 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 626 virtual BasicType memory_type() const { return T_CHAR; } 627 }; 628 629 //------------------------------StoreINode------------------------------------- 630 // Store int to memory 631 class StoreINode : public StoreNode { 632 public: 633 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 634 : StoreNode(c, mem, adr, at, val, mo) {} 635 virtual int Opcode() const; 636 virtual BasicType memory_type() const { return T_INT; } 637 }; 638 639 //------------------------------StoreLNode------------------------------------- 640 // Store long to memory 641 class StoreLNode : public StoreNode { 642 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 643 virtual uint cmp( const Node &n ) const { 644 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 645 && StoreNode::cmp(n); 646 } 647 virtual uint size_of() const { return sizeof(*this); } 648 const bool _require_atomic_access; // is piecewise store forbidden? 649 650 public: 651 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 652 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 653 virtual int Opcode() const; 654 virtual BasicType memory_type() const { return T_LONG; } 655 bool require_atomic_access() const { return _require_atomic_access; } 656 static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 657 #ifndef PRODUCT 658 virtual void dump_spec(outputStream *st) const { 659 StoreNode::dump_spec(st); 660 if (_require_atomic_access) st->print(" Atomic!"); 661 } 662 #endif 663 }; 664 665 //------------------------------StoreFNode------------------------------------- 666 // Store float to memory 667 class StoreFNode : public StoreNode { 668 public: 669 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 670 : StoreNode(c, mem, adr, at, val, mo) {} 671 virtual int Opcode() const; 672 virtual BasicType memory_type() const { return T_FLOAT; } 673 }; 674 675 //------------------------------StoreDNode------------------------------------- 676 // Store double to memory 677 class StoreDNode : public StoreNode { 678 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 679 virtual uint cmp( const Node &n ) const { 680 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access 681 && StoreNode::cmp(n); 682 } 683 virtual uint size_of() const { return sizeof(*this); } 684 const bool _require_atomic_access; // is piecewise store forbidden? 685 public: 686 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, 687 MemOrd mo, bool require_atomic_access = false) 688 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 689 virtual int Opcode() const; 690 virtual BasicType memory_type() const { return T_DOUBLE; } 691 bool require_atomic_access() const { return _require_atomic_access; } 692 static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 693 #ifndef PRODUCT 694 virtual void dump_spec(outputStream *st) const { 695 StoreNode::dump_spec(st); 696 if (_require_atomic_access) st->print(" Atomic!"); 697 } 698 #endif 699 700 }; 701 702 //------------------------------StorePNode------------------------------------- 703 // Store pointer to memory 704 class StorePNode : public StoreNode { 705 public: 706 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 707 : StoreNode(c, mem, adr, at, val, mo) {} 708 virtual int Opcode() const; 709 virtual BasicType memory_type() const { return T_ADDRESS; } 710 }; 711 712 //------------------------------StoreNNode------------------------------------- 713 // Store narrow oop to memory 714 class StoreNNode : public StoreNode { 715 public: 716 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 717 : StoreNode(c, mem, adr, at, val, mo) {} 718 virtual int Opcode() const; 719 virtual BasicType memory_type() const { return T_NARROWOOP; } 720 }; 721 722 //------------------------------StoreNKlassNode-------------------------------------- 723 // Store narrow klass to memory 724 class StoreNKlassNode : public StoreNNode { 725 public: 726 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 727 : StoreNNode(c, mem, adr, at, val, mo) {} 728 virtual int Opcode() const; 729 virtual BasicType memory_type() const { return T_NARROWKLASS; } 730 }; 731 732 //------------------------------StoreCMNode----------------------------------- 733 // Store card-mark byte to memory for CM 734 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 735 // Preceeding equivalent StoreCMs may be eliminated. 736 class StoreCMNode : public StoreNode { 737 private: 738 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 739 virtual uint cmp( const Node &n ) const { 740 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 741 && StoreNode::cmp(n); 742 } 743 virtual uint size_of() const { return sizeof(*this); } 744 int _oop_alias_idx; // The alias_idx of OopStore 745 746 public: 747 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 748 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 749 _oop_alias_idx(oop_alias_idx) { 750 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 751 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 752 "bad oop alias idx"); 753 } 754 virtual int Opcode() const; 755 virtual Node* Identity(PhaseGVN* phase); 756 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 757 virtual const Type* Value(PhaseGVN* phase) const; 758 virtual BasicType memory_type() const { return T_VOID; } // unspecific 759 int oop_alias_idx() const { return _oop_alias_idx; } 760 }; 761 762 //------------------------------LoadPLockedNode--------------------------------- 763 // Load-locked a pointer from memory (either object or array). 764 // On Sparc & Intel this is implemented as a normal pointer load. 765 // On PowerPC and friends it's a real load-locked. 766 class LoadPLockedNode : public LoadPNode { 767 public: 768 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 769 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 770 virtual int Opcode() const; 771 virtual int store_Opcode() const { return Op_StorePConditional; } 772 virtual bool depends_only_on_test() const { return true; } 773 }; 774 775 //------------------------------SCMemProjNode--------------------------------------- 776 // This class defines a projection of the memory state of a store conditional node. 777 // These nodes return a value, but also update memory. 778 class SCMemProjNode : public ProjNode { 779 public: 780 enum {SCMEMPROJCON = (uint)-2}; 781 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 782 virtual int Opcode() const; 783 virtual bool is_CFG() const { return false; } 784 virtual const Type *bottom_type() const {return Type::MEMORY;} 785 virtual const TypePtr *adr_type() const { 786 Node* ctrl = in(0); 787 if (ctrl == NULL) return NULL; // node is dead 788 return ctrl->in(MemNode::Memory)->adr_type(); 789 } 790 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 791 virtual const Type* Value(PhaseGVN* phase) const; 792 #ifndef PRODUCT 793 virtual void dump_spec(outputStream *st) const {}; 794 #endif 795 }; 796 797 //------------------------------LoadStoreNode--------------------------- 798 // Note: is_Mem() method returns 'true' for this class. 799 class LoadStoreNode : public Node { 800 private: 801 const Type* const _type; // What kind of value is loaded? 802 const TypePtr* _adr_type; // What kind of memory is being addressed? 803 virtual uint size_of() const; // Size is bigger 804 public: 805 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 806 virtual bool depends_only_on_test() const { return false; } 807 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 808 809 virtual const Type *bottom_type() const { return _type; } 810 virtual uint ideal_reg() const; 811 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 812 813 bool result_not_used() const; 814 }; 815 816 class LoadStoreConditionalNode : public LoadStoreNode { 817 public: 818 enum { 819 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 820 }; 821 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 822 }; 823 824 //------------------------------StorePConditionalNode--------------------------- 825 // Conditionally store pointer to memory, if no change since prior 826 // load-locked. Sets flags for success or failure of the store. 827 class StorePConditionalNode : public LoadStoreConditionalNode { 828 public: 829 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 830 virtual int Opcode() const; 831 // Produces flags 832 virtual uint ideal_reg() const { return Op_RegFlags; } 833 }; 834 835 //------------------------------StoreIConditionalNode--------------------------- 836 // Conditionally store int to memory, if no change since prior 837 // load-locked. Sets flags for success or failure of the store. 838 class StoreIConditionalNode : public LoadStoreConditionalNode { 839 public: 840 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 841 virtual int Opcode() const; 842 // Produces flags 843 virtual uint ideal_reg() const { return Op_RegFlags; } 844 }; 845 846 //------------------------------StoreLConditionalNode--------------------------- 847 // Conditionally store long to memory, if no change since prior 848 // load-locked. Sets flags for success or failure of the store. 849 class StoreLConditionalNode : public LoadStoreConditionalNode { 850 public: 851 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 852 virtual int Opcode() const; 853 // Produces flags 854 virtual uint ideal_reg() const { return Op_RegFlags; } 855 }; 856 857 class CompareAndSwapNode : public LoadStoreConditionalNode { 858 private: 859 const MemNode::MemOrd _mem_ord; 860 public: 861 CompareAndSwapNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : LoadStoreConditionalNode(c, mem, adr, val, ex), _mem_ord(mem_ord) {} 862 MemNode::MemOrd order() const { 863 return _mem_ord; 864 } 865 }; 866 867 class CompareAndExchangeNode : public LoadStoreNode { 868 private: 869 const MemNode::MemOrd _mem_ord; 870 public: 871 enum { 872 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 873 }; 874 CompareAndExchangeNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord, const TypePtr* at, const Type* t) : 875 LoadStoreNode(c, mem, adr, val, at, t, 5), _mem_ord(mem_ord) { 876 init_req(ExpectedIn, ex ); 877 } 878 879 MemNode::MemOrd order() const { 880 return _mem_ord; 881 } 882 }; 883 884 //------------------------------CompareAndSwapLNode--------------------------- 885 class CompareAndSwapLNode : public CompareAndSwapNode { 886 public: 887 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 888 virtual int Opcode() const; 889 }; 890 891 892 //------------------------------CompareAndSwapINode--------------------------- 893 class CompareAndSwapINode : public CompareAndSwapNode { 894 public: 895 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 896 virtual int Opcode() const; 897 }; 898 899 900 //------------------------------CompareAndSwapPNode--------------------------- 901 class CompareAndSwapPNode : public CompareAndSwapNode { 902 public: 903 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 904 virtual int Opcode() const; 905 }; 906 907 //------------------------------CompareAndSwapNNode--------------------------- 908 class CompareAndSwapNNode : public CompareAndSwapNode { 909 public: 910 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 911 virtual int Opcode() const; 912 }; 913 914 915 //------------------------------WeakCompareAndSwapLNode--------------------------- 916 class WeakCompareAndSwapLNode : public CompareAndSwapNode { 917 public: 918 WeakCompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 919 virtual int Opcode() const; 920 }; 921 922 923 //------------------------------WeakCompareAndSwapINode--------------------------- 924 class WeakCompareAndSwapINode : public CompareAndSwapNode { 925 public: 926 WeakCompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 927 virtual int Opcode() const; 928 }; 929 930 931 //------------------------------WeakCompareAndSwapPNode--------------------------- 932 class WeakCompareAndSwapPNode : public CompareAndSwapNode { 933 public: 934 WeakCompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 935 virtual int Opcode() const; 936 }; 937 938 //------------------------------WeakCompareAndSwapNNode--------------------------- 939 class WeakCompareAndSwapNNode : public CompareAndSwapNode { 940 public: 941 WeakCompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapNode(c, mem, adr, val, ex, mem_ord) { } 942 virtual int Opcode() const; 943 }; 944 945 //------------------------------CompareAndExchangeLNode--------------------------- 946 class CompareAndExchangeLNode : public CompareAndExchangeNode { 947 public: 948 CompareAndExchangeLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeLong::LONG) { } 949 virtual int Opcode() const; 950 }; 951 952 953 //------------------------------CompareAndExchangeINode--------------------------- 954 class CompareAndExchangeINode : public CompareAndExchangeNode { 955 public: 956 CompareAndExchangeINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, TypeInt::INT) { } 957 virtual int Opcode() const; 958 }; 959 960 961 //------------------------------CompareAndExchangePNode--------------------------- 962 class CompareAndExchangePNode : public CompareAndExchangeNode { 963 public: 964 CompareAndExchangePNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 965 virtual int Opcode() const; 966 }; 967 968 //------------------------------CompareAndExchangeNNode--------------------------- 969 class CompareAndExchangeNNode : public CompareAndExchangeNode { 970 public: 971 CompareAndExchangeNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangeNode(c, mem, adr, val, ex, mem_ord, at, t) { } 972 virtual int Opcode() const; 973 }; 974 975 //------------------------------GetAndAddINode--------------------------- 976 class GetAndAddINode : public LoadStoreNode { 977 public: 978 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 979 virtual int Opcode() const; 980 }; 981 982 //------------------------------GetAndAddLNode--------------------------- 983 class GetAndAddLNode : public LoadStoreNode { 984 public: 985 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 986 virtual int Opcode() const; 987 }; 988 989 990 //------------------------------GetAndSetINode--------------------------- 991 class GetAndSetINode : public LoadStoreNode { 992 public: 993 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 994 virtual int Opcode() const; 995 }; 996 997 //------------------------------GetAndSetINode--------------------------- 998 class GetAndSetLNode : public LoadStoreNode { 999 public: 1000 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 1001 virtual int Opcode() const; 1002 }; 1003 1004 //------------------------------GetAndSetPNode--------------------------- 1005 class GetAndSetPNode : public LoadStoreNode { 1006 public: 1007 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1008 virtual int Opcode() const; 1009 }; 1010 1011 //------------------------------GetAndSetNNode--------------------------- 1012 class GetAndSetNNode : public LoadStoreNode { 1013 public: 1014 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 1015 virtual int Opcode() const; 1016 }; 1017 1018 //------------------------------ClearArray------------------------------------- 1019 class ClearArrayNode: public Node { 1020 private: 1021 bool _is_large; 1022 public: 1023 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base, bool is_large) 1024 : Node(ctrl,arymem,word_cnt,base), _is_large(is_large) { 1025 init_class_id(Class_ClearArray); 1026 } 1027 virtual int Opcode() const; 1028 virtual const Type *bottom_type() const { return Type::MEMORY; } 1029 // ClearArray modifies array elements, and so affects only the 1030 // array memory addressed by the bottom_type of its base address. 1031 virtual const class TypePtr *adr_type() const; 1032 virtual Node* Identity(PhaseGVN* phase); 1033 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1034 virtual uint match_edge(uint idx) const; 1035 bool is_large() const { return _is_large; } 1036 1037 // Clear the given area of an object or array. 1038 // The start offset must always be aligned mod BytesPerInt. 1039 // The end offset must always be aligned mod BytesPerLong. 1040 // Return the new memory. 1041 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1042 intptr_t start_offset, 1043 intptr_t end_offset, 1044 PhaseGVN* phase); 1045 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1046 intptr_t start_offset, 1047 Node* end_offset, 1048 PhaseGVN* phase); 1049 static Node* clear_memory(Node* control, Node* mem, Node* dest, 1050 Node* start_offset, 1051 Node* end_offset, 1052 PhaseGVN* phase); 1053 // Return allocation input memory edge if it is different instance 1054 // or itself if it is the one we are looking for. 1055 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 1056 }; 1057 1058 //------------------------------MemBar----------------------------------------- 1059 // There are different flavors of Memory Barriers to match the Java Memory 1060 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 1061 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 1062 // volatile-load. Monitor-exit and volatile-store act as Release: no 1063 // preceding ref can be moved to after them. We insert a MemBar-Release 1064 // before a FastUnlock or volatile-store. All volatiles need to be 1065 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 1066 // separate it from any following volatile-load. 1067 class MemBarNode: public MultiNode { 1068 virtual uint hash() const ; // { return NO_HASH; } 1069 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1070 1071 virtual uint size_of() const { return sizeof(*this); } 1072 // Memory type this node is serializing. Usually either rawptr or bottom. 1073 const TypePtr* _adr_type; 1074 1075 public: 1076 enum { 1077 Precedent = TypeFunc::Parms // optional edge to force precedence 1078 }; 1079 MemBarNode(Compile* C, int alias_idx, Node* precedent); 1080 virtual int Opcode() const = 0; 1081 virtual const class TypePtr *adr_type() const { return _adr_type; } 1082 virtual const Type* Value(PhaseGVN* phase) const; 1083 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1084 virtual uint match_edge(uint idx) const { return 0; } 1085 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 1086 virtual Node *match( const ProjNode *proj, const Matcher *m ); 1087 // Factory method. Builds a wide or narrow membar. 1088 // Optional 'precedent' becomes an extra edge if not null. 1089 static MemBarNode* make(Compile* C, int opcode, 1090 int alias_idx = Compile::AliasIdxBot, 1091 Node* precedent = NULL); 1092 }; 1093 1094 // "Acquire" - no following ref can move before (but earlier refs can 1095 // follow, like an early Load stalled in cache). Requires multi-cpu 1096 // visibility. Inserted after a volatile load. 1097 class MemBarAcquireNode: public MemBarNode { 1098 public: 1099 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 1100 : MemBarNode(C, alias_idx, precedent) {} 1101 virtual int Opcode() const; 1102 }; 1103 1104 // "Acquire" - no following ref can move before (but earlier refs can 1105 // follow, like an early Load stalled in cache). Requires multi-cpu 1106 // visibility. Inserted independ of any load, as required 1107 // for intrinsic Unsafe.loadFence(). 1108 class LoadFenceNode: public MemBarNode { 1109 public: 1110 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 1111 : MemBarNode(C, alias_idx, precedent) {} 1112 virtual int Opcode() const; 1113 }; 1114 1115 // "Release" - no earlier ref can move after (but later refs can move 1116 // up, like a speculative pipelined cache-hitting Load). Requires 1117 // multi-cpu visibility. Inserted before a volatile store. 1118 class MemBarReleaseNode: public MemBarNode { 1119 public: 1120 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 1121 : MemBarNode(C, alias_idx, precedent) {} 1122 virtual int Opcode() const; 1123 }; 1124 1125 // "Release" - no earlier ref can move after (but later refs can move 1126 // up, like a speculative pipelined cache-hitting Load). Requires 1127 // multi-cpu visibility. Inserted independent of any store, as required 1128 // for intrinsic Unsafe.storeFence(). 1129 class StoreFenceNode: public MemBarNode { 1130 public: 1131 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 1132 : MemBarNode(C, alias_idx, precedent) {} 1133 virtual int Opcode() const; 1134 }; 1135 1136 // "Acquire" - no following ref can move before (but earlier refs can 1137 // follow, like an early Load stalled in cache). Requires multi-cpu 1138 // visibility. Inserted after a FastLock. 1139 class MemBarAcquireLockNode: public MemBarNode { 1140 public: 1141 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 1142 : MemBarNode(C, alias_idx, precedent) {} 1143 virtual int Opcode() const; 1144 }; 1145 1146 // "Release" - no earlier ref can move after (but later refs can move 1147 // up, like a speculative pipelined cache-hitting Load). Requires 1148 // multi-cpu visibility. Inserted before a FastUnLock. 1149 class MemBarReleaseLockNode: public MemBarNode { 1150 public: 1151 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 1152 : MemBarNode(C, alias_idx, precedent) {} 1153 virtual int Opcode() const; 1154 }; 1155 1156 class MemBarStoreStoreNode: public MemBarNode { 1157 public: 1158 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 1159 : MemBarNode(C, alias_idx, precedent) { 1160 init_class_id(Class_MemBarStoreStore); 1161 } 1162 virtual int Opcode() const; 1163 }; 1164 1165 // Ordering between a volatile store and a following volatile load. 1166 // Requires multi-CPU visibility? 1167 class MemBarVolatileNode: public MemBarNode { 1168 public: 1169 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 1170 : MemBarNode(C, alias_idx, precedent) {} 1171 virtual int Opcode() const; 1172 }; 1173 1174 // Ordering within the same CPU. Used to order unsafe memory references 1175 // inside the compiler when we lack alias info. Not needed "outside" the 1176 // compiler because the CPU does all the ordering for us. 1177 class MemBarCPUOrderNode: public MemBarNode { 1178 public: 1179 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 1180 : MemBarNode(C, alias_idx, precedent) {} 1181 virtual int Opcode() const; 1182 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1183 }; 1184 1185 // Isolation of object setup after an AllocateNode and before next safepoint. 1186 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 1187 class InitializeNode: public MemBarNode { 1188 friend class AllocateNode; 1189 1190 enum { 1191 Incomplete = 0, 1192 Complete = 1, 1193 WithArraycopy = 2 1194 }; 1195 int _is_complete; 1196 1197 bool _does_not_escape; 1198 1199 public: 1200 enum { 1201 Control = TypeFunc::Control, 1202 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1203 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1204 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1205 }; 1206 1207 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1208 virtual int Opcode() const; 1209 virtual uint size_of() const { return sizeof(*this); } 1210 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1211 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1212 1213 // Manage incoming memory edges via a MergeMem on in(Memory): 1214 Node* memory(uint alias_idx); 1215 1216 // The raw memory edge coming directly from the Allocation. 1217 // The contents of this memory are *always* all-zero-bits. 1218 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1219 1220 // Return the corresponding allocation for this initialization (or null if none). 1221 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1222 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1223 AllocateNode* allocation(); 1224 1225 // Anything other than zeroing in this init? 1226 bool is_non_zero(); 1227 1228 // An InitializeNode must completed before macro expansion is done. 1229 // Completion requires that the AllocateNode must be followed by 1230 // initialization of the new memory to zero, then to any initializers. 1231 bool is_complete() { return _is_complete != Incomplete; } 1232 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1233 1234 // Mark complete. (Must not yet be complete.) 1235 void set_complete(PhaseGVN* phase); 1236 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1237 1238 bool does_not_escape() { return _does_not_escape; } 1239 void set_does_not_escape() { _does_not_escape = true; } 1240 1241 #ifdef ASSERT 1242 // ensure all non-degenerate stores are ordered and non-overlapping 1243 bool stores_are_sane(PhaseTransform* phase); 1244 #endif //ASSERT 1245 1246 // See if this store can be captured; return offset where it initializes. 1247 // Return 0 if the store cannot be moved (any sort of problem). 1248 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1249 1250 // Capture another store; reformat it to write my internal raw memory. 1251 // Return the captured copy, else NULL if there is some sort of problem. 1252 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1253 1254 // Find captured store which corresponds to the range [start..start+size). 1255 // Return my own memory projection (meaning the initial zero bits) 1256 // if there is no such store. Return NULL if there is a problem. 1257 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1258 1259 // Called when the associated AllocateNode is expanded into CFG. 1260 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1261 intptr_t header_size, Node* size_in_bytes, 1262 PhaseGVN* phase); 1263 1264 private: 1265 void remove_extra_zeroes(); 1266 1267 // Find out where a captured store should be placed (or already is placed). 1268 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1269 PhaseTransform* phase); 1270 1271 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1272 1273 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1274 1275 bool detect_init_independence(Node* n, int& count); 1276 1277 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1278 PhaseGVN* phase); 1279 1280 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1281 }; 1282 1283 //------------------------------MergeMem--------------------------------------- 1284 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1285 class MergeMemNode: public Node { 1286 virtual uint hash() const ; // { return NO_HASH; } 1287 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1288 friend class MergeMemStream; 1289 MergeMemNode(Node* def); // clients use MergeMemNode::make 1290 1291 public: 1292 // If the input is a whole memory state, clone it with all its slices intact. 1293 // Otherwise, make a new memory state with just that base memory input. 1294 // In either case, the result is a newly created MergeMem. 1295 static MergeMemNode* make(Node* base_memory); 1296 1297 virtual int Opcode() const; 1298 virtual Node* Identity(PhaseGVN* phase); 1299 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1300 virtual uint ideal_reg() const { return NotAMachineReg; } 1301 virtual uint match_edge(uint idx) const { return 0; } 1302 virtual const RegMask &out_RegMask() const; 1303 virtual const Type *bottom_type() const { return Type::MEMORY; } 1304 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1305 // sparse accessors 1306 // Fetch the previously stored "set_memory_at", or else the base memory. 1307 // (Caller should clone it if it is a phi-nest.) 1308 Node* memory_at(uint alias_idx) const; 1309 // set the memory, regardless of its previous value 1310 void set_memory_at(uint alias_idx, Node* n); 1311 // the "base" is the memory that provides the non-finite support 1312 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1313 // warning: setting the base can implicitly set any of the other slices too 1314 void set_base_memory(Node* def); 1315 // sentinel value which denotes a copy of the base memory: 1316 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1317 static Node* make_empty_memory(); // where the sentinel comes from 1318 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1319 // hook for the iterator, to perform any necessary setup 1320 void iteration_setup(const MergeMemNode* other = NULL); 1321 // push sentinels until I am at least as long as the other (semantic no-op) 1322 void grow_to_match(const MergeMemNode* other); 1323 bool verify_sparse() const PRODUCT_RETURN0; 1324 #ifndef PRODUCT 1325 virtual void dump_spec(outputStream *st) const; 1326 #endif 1327 }; 1328 1329 class MergeMemStream : public StackObj { 1330 private: 1331 MergeMemNode* _mm; 1332 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1333 Node* _mm_base; // loop-invariant base memory of _mm 1334 int _idx; 1335 int _cnt; 1336 Node* _mem; 1337 Node* _mem2; 1338 int _cnt2; 1339 1340 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1341 // subsume_node will break sparseness at times, whenever a memory slice 1342 // folds down to a copy of the base ("fat") memory. In such a case, 1343 // the raw edge will update to base, although it should be top. 1344 // This iterator will recognize either top or base_memory as an 1345 // "empty" slice. See is_empty, is_empty2, and next below. 1346 // 1347 // The sparseness property is repaired in MergeMemNode::Ideal. 1348 // As long as access to a MergeMem goes through this iterator 1349 // or the memory_at accessor, flaws in the sparseness will 1350 // never be observed. 1351 // 1352 // Also, iteration_setup repairs sparseness. 1353 assert(mm->verify_sparse(), "please, no dups of base"); 1354 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1355 1356 _mm = mm; 1357 _mm_base = mm->base_memory(); 1358 _mm2 = mm2; 1359 _cnt = mm->req(); 1360 _idx = Compile::AliasIdxBot-1; // start at the base memory 1361 _mem = NULL; 1362 _mem2 = NULL; 1363 } 1364 1365 #ifdef ASSERT 1366 Node* check_memory() const { 1367 if (at_base_memory()) 1368 return _mm->base_memory(); 1369 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1370 return _mm->memory_at(_idx); 1371 else 1372 return _mm_base; 1373 } 1374 Node* check_memory2() const { 1375 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1376 } 1377 #endif 1378 1379 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1380 void assert_synch() const { 1381 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1382 "no side-effects except through the stream"); 1383 } 1384 1385 public: 1386 1387 // expected usages: 1388 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1389 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1390 1391 // iterate over one merge 1392 MergeMemStream(MergeMemNode* mm) { 1393 mm->iteration_setup(); 1394 init(mm); 1395 debug_only(_cnt2 = 999); 1396 } 1397 // iterate in parallel over two merges 1398 // only iterates through non-empty elements of mm2 1399 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1400 assert(mm2, "second argument must be a MergeMem also"); 1401 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1402 mm->iteration_setup(mm2); 1403 init(mm, mm2); 1404 _cnt2 = mm2->req(); 1405 } 1406 #ifdef ASSERT 1407 ~MergeMemStream() { 1408 assert_synch(); 1409 } 1410 #endif 1411 1412 MergeMemNode* all_memory() const { 1413 return _mm; 1414 } 1415 Node* base_memory() const { 1416 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1417 return _mm_base; 1418 } 1419 const MergeMemNode* all_memory2() const { 1420 assert(_mm2 != NULL, ""); 1421 return _mm2; 1422 } 1423 bool at_base_memory() const { 1424 return _idx == Compile::AliasIdxBot; 1425 } 1426 int alias_idx() const { 1427 assert(_mem, "must call next 1st"); 1428 return _idx; 1429 } 1430 1431 const TypePtr* adr_type() const { 1432 return Compile::current()->get_adr_type(alias_idx()); 1433 } 1434 1435 const TypePtr* adr_type(Compile* C) const { 1436 return C->get_adr_type(alias_idx()); 1437 } 1438 bool is_empty() const { 1439 assert(_mem, "must call next 1st"); 1440 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1441 return _mem->is_top(); 1442 } 1443 bool is_empty2() const { 1444 assert(_mem2, "must call next 1st"); 1445 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1446 return _mem2->is_top(); 1447 } 1448 Node* memory() const { 1449 assert(!is_empty(), "must not be empty"); 1450 assert_synch(); 1451 return _mem; 1452 } 1453 // get the current memory, regardless of empty or non-empty status 1454 Node* force_memory() const { 1455 assert(!is_empty() || !at_base_memory(), ""); 1456 // Use _mm_base to defend against updates to _mem->base_memory(). 1457 Node *mem = _mem->is_top() ? _mm_base : _mem; 1458 assert(mem == check_memory(), ""); 1459 return mem; 1460 } 1461 Node* memory2() const { 1462 assert(_mem2 == check_memory2(), ""); 1463 return _mem2; 1464 } 1465 void set_memory(Node* mem) { 1466 if (at_base_memory()) { 1467 // Note that this does not change the invariant _mm_base. 1468 _mm->set_base_memory(mem); 1469 } else { 1470 _mm->set_memory_at(_idx, mem); 1471 } 1472 _mem = mem; 1473 assert_synch(); 1474 } 1475 1476 // Recover from a side effect to the MergeMemNode. 1477 void set_memory() { 1478 _mem = _mm->in(_idx); 1479 } 1480 1481 bool next() { return next(false); } 1482 bool next2() { return next(true); } 1483 1484 bool next_non_empty() { return next_non_empty(false); } 1485 bool next_non_empty2() { return next_non_empty(true); } 1486 // next_non_empty2 can yield states where is_empty() is true 1487 1488 private: 1489 // find the next item, which might be empty 1490 bool next(bool have_mm2) { 1491 assert((_mm2 != NULL) == have_mm2, "use other next"); 1492 assert_synch(); 1493 if (++_idx < _cnt) { 1494 // Note: This iterator allows _mm to be non-sparse. 1495 // It behaves the same whether _mem is top or base_memory. 1496 _mem = _mm->in(_idx); 1497 if (have_mm2) 1498 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1499 return true; 1500 } 1501 return false; 1502 } 1503 1504 // find the next non-empty item 1505 bool next_non_empty(bool have_mm2) { 1506 while (next(have_mm2)) { 1507 if (!is_empty()) { 1508 // make sure _mem2 is filled in sensibly 1509 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1510 return true; 1511 } else if (have_mm2 && !is_empty2()) { 1512 return true; // is_empty() == true 1513 } 1514 } 1515 return false; 1516 } 1517 }; 1518 1519 //------------------------------Prefetch--------------------------------------- 1520 1521 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1522 class PrefetchAllocationNode : public Node { 1523 public: 1524 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1525 virtual int Opcode() const; 1526 virtual uint ideal_reg() const { return NotAMachineReg; } 1527 virtual uint match_edge(uint idx) const { return idx==2; } 1528 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1529 }; 1530 1531 #endif // SHARE_VM_OPTO_MEMNODE_HPP