1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP 26 #define SHARE_VM_OPTO_MEMNODE_HPP 27 28 #include "opto/multnode.hpp" 29 #include "opto/node.hpp" 30 #include "opto/opcodes.hpp" 31 #include "opto/type.hpp" 32 33 // Portions of code courtesy of Clifford Click 34 35 class MultiNode; 36 class PhaseCCP; 37 class PhaseTransform; 38 39 //------------------------------MemNode---------------------------------------- 40 // Load or Store, possibly throwing a NULL pointer exception 41 class MemNode : public Node { 42 protected: 43 #ifdef ASSERT 44 const TypePtr* _adr_type; // What kind of memory is being addressed? 45 #endif 46 virtual uint size_of() const; // Size is bigger (ASSERT only) 47 public: 48 enum { Control, // When is it safe to do this load? 49 Memory, // Chunk of memory is being loaded from 50 Address, // Actually address, derived from base 51 ValueIn, // Value to store 52 OopStore // Preceeding oop store, only in StoreCM 53 }; 54 typedef enum { unordered = 0, 55 acquire, // Load has to acquire or be succeeded by MemBarAcquire. 56 release // Store has to release or be preceded by MemBarRelease. 57 } MemOrd; 58 protected: 59 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 60 : Node(c0,c1,c2 ) { 61 init_class_id(Class_Mem); 62 debug_only(_adr_type=at; adr_type();) 63 } 64 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 65 : Node(c0,c1,c2,c3) { 66 init_class_id(Class_Mem); 67 debug_only(_adr_type=at; adr_type();) 68 } 69 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 70 : Node(c0,c1,c2,c3,c4) { 71 init_class_id(Class_Mem); 72 debug_only(_adr_type=at; adr_type();) 73 } 74 75 public: 76 // Helpers for the optimizer. Documented in memnode.cpp. 77 static bool detect_ptr_independence(Node* p1, AllocateNode* a1, 78 Node* p2, AllocateNode* a2, 79 PhaseTransform* phase); 80 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); 81 82 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); 83 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); 84 // This one should probably be a phase-specific function: 85 static bool all_controls_dominate(Node* dom, Node* sub); 86 87 // Find any cast-away of null-ness and keep its control. 88 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); 89 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); 90 91 virtual const class TypePtr *adr_type() const; // returns bottom_type of address 92 93 // Shared code for Ideal methods: 94 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. 95 96 // Helper function for adr_type() implementations. 97 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); 98 99 // Raw access function, to allow copying of adr_type efficiently in 100 // product builds and retain the debug info for debug builds. 101 const TypePtr *raw_adr_type() const { 102 #ifdef ASSERT 103 return _adr_type; 104 #else 105 return 0; 106 #endif 107 } 108 109 // Map a load or store opcode to its corresponding store opcode. 110 // (Return -1 if unknown.) 111 virtual int store_Opcode() const { return -1; } 112 113 // What is the type of the value in memory? (T_VOID mean "unspecified".) 114 virtual BasicType memory_type() const = 0; 115 virtual int memory_size() const { 116 #ifdef ASSERT 117 return type2aelembytes(memory_type(), true); 118 #else 119 return type2aelembytes(memory_type()); 120 #endif 121 } 122 123 // Search through memory states which precede this node (load or store). 124 // Look for an exact match for the address, with no intervening 125 // aliased stores. 126 Node* find_previous_store(PhaseTransform* phase); 127 128 // Can this node (load or store) accurately see a stored value in 129 // the given memory state? (The state may or may not be in(Memory).) 130 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 131 132 #ifndef PRODUCT 133 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 134 virtual void dump_spec(outputStream *st) const; 135 #endif 136 }; 137 138 //------------------------------LoadNode--------------------------------------- 139 // Load value; requires Memory and Address 140 class LoadNode : public MemNode { 141 private: 142 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 143 // loads that can be reordered, and such requiring acquire semantics to 144 // adhere to the Java specification. The required behaviour is stored in 145 // this field. 146 const MemOrd _mo; 147 148 protected: 149 virtual uint cmp(const Node &n) const; 150 virtual uint size_of() const; // Size is bigger 151 const Type* const _type; // What kind of value is loaded? 152 public: 153 154 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo) 155 : MemNode(c,mem,adr,at), _type(rt), _mo(mo) { 156 init_class_id(Class_Load); 157 } 158 inline bool is_unordered() const { return !is_acquire(); } 159 inline bool is_acquire() const { 160 assert(_mo == unordered || _mo == acquire, "unexpected"); 161 return _mo == acquire; 162 } 163 164 // Polymorphic factory method: 165 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 166 const TypePtr* at, const Type *rt, BasicType bt, MemOrd mo); 167 168 virtual uint hash() const; // Check the type 169 170 // Handle algebraic identities here. If we have an identity, return the Node 171 // we are equivalent to. We look for Load of a Store. 172 virtual Node *Identity( PhaseTransform *phase ); 173 174 // If the load is from Field memory and the pointer is non-null, we can 175 // zero out the control input. 176 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 177 178 // Split instance field load through Phi. 179 Node* split_through_phi(PhaseGVN *phase); 180 181 // Recover original value from boxed values 182 Node *eliminate_autobox(PhaseGVN *phase); 183 184 // Compute a new Type for this node. Basically we just do the pre-check, 185 // then call the virtual add() to set the type. 186 virtual const Type *Value( PhaseTransform *phase ) const; 187 188 // Common methods for LoadKlass and LoadNKlass nodes. 189 const Type *klass_value_common( PhaseTransform *phase ) const; 190 Node *klass_identity_common( PhaseTransform *phase ); 191 192 virtual uint ideal_reg() const; 193 virtual const Type *bottom_type() const; 194 // Following method is copied from TypeNode: 195 void set_type(const Type* t) { 196 assert(t != NULL, "sanity"); 197 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); 198 *(const Type**)&_type = t; // cast away const-ness 199 // If this node is in the hash table, make sure it doesn't need a rehash. 200 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); 201 } 202 const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; 203 204 // Do not match memory edge 205 virtual uint match_edge(uint idx) const; 206 207 // Map a load opcode to its corresponding store opcode. 208 virtual int store_Opcode() const = 0; 209 210 // Check if the load's memory input is a Phi node with the same control. 211 bool is_instance_field_load_with_local_phi(Node* ctrl); 212 213 #ifndef PRODUCT 214 virtual void dump_spec(outputStream *st) const; 215 #endif 216 #ifdef ASSERT 217 // Helper function to allow a raw load without control edge for some cases 218 static bool is_immutable_value(Node* adr); 219 #endif 220 protected: 221 const Type* load_array_final_field(const TypeKlassPtr *tkls, 222 ciKlass* klass) const; 223 // depends_only_on_test is almost always true, and needs to be almost always 224 // true to enable key hoisting & commoning optimizations. However, for the 225 // special case of RawPtr loads from TLS top & end, and other loads performed by 226 // GC barriers, the control edge carries the dependence preventing hoisting past 227 // a Safepoint instead of the memory edge. (An unfortunate consequence of having 228 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes 229 // which produce results (new raw memory state) inside of loops preventing all 230 // manner of other optimizations). Basically, it's ugly but so is the alternative. 231 // See comment in macro.cpp, around line 125 expand_allocate_common(). 232 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } 233 234 }; 235 236 //------------------------------LoadBNode-------------------------------------- 237 // Load a byte (8bits signed) from memory 238 class LoadBNode : public LoadNode { 239 public: 240 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) 241 : LoadNode(c, mem, adr, at, ti, mo) {} 242 virtual int Opcode() const; 243 virtual uint ideal_reg() const { return Op_RegI; } 244 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 245 virtual const Type *Value(PhaseTransform *phase) const; 246 virtual int store_Opcode() const { return Op_StoreB; } 247 virtual BasicType memory_type() const { return T_BYTE; } 248 }; 249 250 //------------------------------LoadUBNode------------------------------------- 251 // Load a unsigned byte (8bits unsigned) from memory 252 class LoadUBNode : public LoadNode { 253 public: 254 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo) 255 : LoadNode(c, mem, adr, at, ti, mo) {} 256 virtual int Opcode() const; 257 virtual uint ideal_reg() const { return Op_RegI; } 258 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); 259 virtual const Type *Value(PhaseTransform *phase) const; 260 virtual int store_Opcode() const { return Op_StoreB; } 261 virtual BasicType memory_type() const { return T_BYTE; } 262 }; 263 264 //------------------------------LoadUSNode------------------------------------- 265 // Load an unsigned short/char (16bits unsigned) from memory 266 class LoadUSNode : public LoadNode { 267 public: 268 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) 269 : LoadNode(c, mem, adr, at, ti, mo) {} 270 virtual int Opcode() const; 271 virtual uint ideal_reg() const { return Op_RegI; } 272 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 273 virtual const Type *Value(PhaseTransform *phase) const; 274 virtual int store_Opcode() const { return Op_StoreC; } 275 virtual BasicType memory_type() const { return T_CHAR; } 276 }; 277 278 //------------------------------LoadSNode-------------------------------------- 279 // Load a short (16bits signed) from memory 280 class LoadSNode : public LoadNode { 281 public: 282 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) 283 : LoadNode(c, mem, adr, at, ti, mo) {} 284 virtual int Opcode() const; 285 virtual uint ideal_reg() const { return Op_RegI; } 286 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 287 virtual const Type *Value(PhaseTransform *phase) const; 288 virtual int store_Opcode() const { return Op_StoreC; } 289 virtual BasicType memory_type() const { return T_SHORT; } 290 }; 291 292 //------------------------------LoadINode-------------------------------------- 293 // Load an integer from memory 294 class LoadINode : public LoadNode { 295 public: 296 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo) 297 : LoadNode(c, mem, adr, at, ti, mo) {} 298 virtual int Opcode() const; 299 virtual uint ideal_reg() const { return Op_RegI; } 300 virtual int store_Opcode() const { return Op_StoreI; } 301 virtual BasicType memory_type() const { return T_INT; } 302 }; 303 304 //------------------------------LoadRangeNode---------------------------------- 305 // Load an array length from the array 306 class LoadRangeNode : public LoadINode { 307 public: 308 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) 309 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} 310 virtual int Opcode() const; 311 virtual const Type *Value( PhaseTransform *phase ) const; 312 virtual Node *Identity( PhaseTransform *phase ); 313 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 314 }; 315 316 //------------------------------LoadLNode-------------------------------------- 317 // Load a long from memory 318 class LoadLNode : public LoadNode { 319 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } 320 virtual uint cmp( const Node &n ) const { 321 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access 322 && LoadNode::cmp(n); 323 } 324 virtual uint size_of() const { return sizeof(*this); } 325 const bool _require_atomic_access; // is piecewise load forbidden? 326 327 public: 328 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, 329 MemOrd mo, bool require_atomic_access = false) 330 : LoadNode(c, mem, adr, at, tl, mo), _require_atomic_access(require_atomic_access) {} 331 virtual int Opcode() const; 332 virtual uint ideal_reg() const { return Op_RegL; } 333 virtual int store_Opcode() const { return Op_StoreL; } 334 virtual BasicType memory_type() const { return T_LONG; } 335 bool require_atomic_access() { return _require_atomic_access; } 336 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, 337 const Type* rt, MemOrd mo); 338 #ifndef PRODUCT 339 virtual void dump_spec(outputStream *st) const { 340 LoadNode::dump_spec(st); 341 if (_require_atomic_access) st->print(" Atomic!"); 342 } 343 #endif 344 }; 345 346 //------------------------------LoadL_unalignedNode---------------------------- 347 // Load a long from unaligned memory 348 class LoadL_unalignedNode : public LoadLNode { 349 public: 350 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo) 351 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo) {} 352 virtual int Opcode() const; 353 }; 354 355 //------------------------------LoadFNode-------------------------------------- 356 // Load a float (64 bits) from memory 357 class LoadFNode : public LoadNode { 358 public: 359 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo) 360 : LoadNode(c, mem, adr, at, t, mo) {} 361 virtual int Opcode() const; 362 virtual uint ideal_reg() const { return Op_RegF; } 363 virtual int store_Opcode() const { return Op_StoreF; } 364 virtual BasicType memory_type() const { return T_FLOAT; } 365 }; 366 367 //------------------------------LoadDNode-------------------------------------- 368 // Load a double (64 bits) from memory 369 class LoadDNode : public LoadNode { 370 public: 371 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo) 372 : LoadNode(c, mem, adr, at, t, mo) {} 373 virtual int Opcode() const; 374 virtual uint ideal_reg() const { return Op_RegD; } 375 virtual int store_Opcode() const { return Op_StoreD; } 376 virtual BasicType memory_type() const { return T_DOUBLE; } 377 }; 378 379 //------------------------------LoadD_unalignedNode---------------------------- 380 // Load a double from unaligned memory 381 class LoadD_unalignedNode : public LoadDNode { 382 public: 383 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo) 384 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo) {} 385 virtual int Opcode() const; 386 }; 387 388 //------------------------------LoadPNode-------------------------------------- 389 // Load a pointer from memory (either object or array) 390 class LoadPNode : public LoadNode { 391 public: 392 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo) 393 : LoadNode(c, mem, adr, at, t, mo) {} 394 virtual int Opcode() const; 395 virtual uint ideal_reg() const { return Op_RegP; } 396 virtual int store_Opcode() const { return Op_StoreP; } 397 virtual BasicType memory_type() const { return T_ADDRESS; } 398 }; 399 400 401 //------------------------------LoadNNode-------------------------------------- 402 // Load a narrow oop from memory (either object or array) 403 class LoadNNode : public LoadNode { 404 public: 405 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo) 406 : LoadNode(c, mem, adr, at, t, mo) {} 407 virtual int Opcode() const; 408 virtual uint ideal_reg() const { return Op_RegN; } 409 virtual int store_Opcode() const { return Op_StoreN; } 410 virtual BasicType memory_type() const { return T_NARROWOOP; } 411 }; 412 413 //------------------------------LoadKlassNode---------------------------------- 414 // Load a Klass from an object 415 class LoadKlassNode : public LoadPNode { 416 public: 417 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) 418 : LoadPNode(c, mem, adr, at, tk, mo) {} 419 virtual int Opcode() const; 420 virtual const Type *Value( PhaseTransform *phase ) const; 421 virtual Node *Identity( PhaseTransform *phase ); 422 virtual bool depends_only_on_test() const { return true; } 423 424 // Polymorphic factory method: 425 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, 426 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT ); 427 }; 428 429 //------------------------------LoadNKlassNode--------------------------------- 430 // Load a narrow Klass from an object. 431 class LoadNKlassNode : public LoadNNode { 432 public: 433 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) 434 : LoadNNode(c, mem, adr, at, tk, mo) {} 435 virtual int Opcode() const; 436 virtual uint ideal_reg() const { return Op_RegN; } 437 virtual int store_Opcode() const { return Op_StoreNKlass; } 438 virtual BasicType memory_type() const { return T_NARROWKLASS; } 439 440 virtual const Type *Value( PhaseTransform *phase ) const; 441 virtual Node *Identity( PhaseTransform *phase ); 442 virtual bool depends_only_on_test() const { return true; } 443 }; 444 445 446 //------------------------------StoreNode-------------------------------------- 447 // Store value; requires Store, Address and Value 448 class StoreNode : public MemNode { 449 private: 450 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish 451 // stores that can be reordered, and such requiring release semantics to 452 // adhere to the Java specification. The required behaviour is stored in 453 // this field. 454 const MemOrd _mo; 455 // Needed for proper cloning. 456 virtual uint size_of() const { return sizeof(*this); } 457 protected: 458 virtual uint cmp( const Node &n ) const; 459 virtual bool depends_only_on_test() const { return false; } 460 461 Node *Ideal_masked_input (PhaseGVN *phase, uint mask); 462 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); 463 464 public: 465 // We must ensure that stores of object references will be visible 466 // only after the object's initialization. So the callers of this 467 // procedure must indicate that the store requires `release' 468 // semantics, if the stored value is an object reference that might 469 // point to a new object and may become externally visible. 470 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 471 : MemNode(c, mem, adr, at, val), _mo(mo) { 472 init_class_id(Class_Store); 473 } 474 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) 475 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { 476 init_class_id(Class_Store); 477 } 478 479 inline bool is_unordered() const { return !is_release(); } 480 inline bool is_release() const { 481 assert((_mo == unordered || _mo == release), "unexpected"); 482 return _mo == release; 483 } 484 485 // Conservatively release stores of object references in order to 486 // ensure visibility of object initialization. 487 static inline MemOrd release_if_reference(const BasicType t) { 488 const MemOrd mo = (t == T_ARRAY || 489 t == T_ADDRESS || // Might be the address of an object reference (`boxing'). 490 t == T_OBJECT) ? release : unordered; 491 return mo; 492 } 493 494 // Polymorphic factory method 495 // 496 // We must ensure that stores of object references will be visible 497 // only after the object's initialization. So the callers of this 498 // procedure must indicate that the store requires `release' 499 // semantics, if the stored value is an object reference that might 500 // point to a new object and may become externally visible. 501 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, 502 const TypePtr* at, Node *val, BasicType bt, MemOrd mo); 503 504 virtual uint hash() const; // Check the type 505 506 // If the store is to Field memory and the pointer is non-null, we can 507 // zero out the control input. 508 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 509 510 // Compute a new Type for this node. Basically we just do the pre-check, 511 // then call the virtual add() to set the type. 512 virtual const Type *Value( PhaseTransform *phase ) const; 513 514 // Check for identity function on memory (Load then Store at same address) 515 virtual Node *Identity( PhaseTransform *phase ); 516 517 // Do not match memory edge 518 virtual uint match_edge(uint idx) const; 519 520 virtual const Type *bottom_type() const; // returns Type::MEMORY 521 522 // Map a store opcode to its corresponding own opcode, trivially. 523 virtual int store_Opcode() const { return Opcode(); } 524 525 // have all possible loads of the value stored been optimized away? 526 bool value_never_loaded(PhaseTransform *phase) const; 527 }; 528 529 //------------------------------StoreBNode------------------------------------- 530 // Store byte to memory 531 class StoreBNode : public StoreNode { 532 public: 533 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 534 : StoreNode(c, mem, adr, at, val, mo) {} 535 virtual int Opcode() const; 536 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 537 virtual BasicType memory_type() const { return T_BYTE; } 538 }; 539 540 //------------------------------StoreCNode------------------------------------- 541 // Store char/short to memory 542 class StoreCNode : public StoreNode { 543 public: 544 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 545 : StoreNode(c, mem, adr, at, val, mo) {} 546 virtual int Opcode() const; 547 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 548 virtual BasicType memory_type() const { return T_CHAR; } 549 }; 550 551 //------------------------------StoreINode------------------------------------- 552 // Store int to memory 553 class StoreINode : public StoreNode { 554 public: 555 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 556 : StoreNode(c, mem, adr, at, val, mo) {} 557 virtual int Opcode() const; 558 virtual BasicType memory_type() const { return T_INT; } 559 }; 560 561 //------------------------------StoreLNode------------------------------------- 562 // Store long to memory 563 class StoreLNode : public StoreNode { 564 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } 565 virtual uint cmp( const Node &n ) const { 566 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access 567 && StoreNode::cmp(n); 568 } 569 virtual uint size_of() const { return sizeof(*this); } 570 const bool _require_atomic_access; // is piecewise store forbidden? 571 572 public: 573 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) 574 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} 575 virtual int Opcode() const; 576 virtual BasicType memory_type() const { return T_LONG; } 577 bool require_atomic_access() { return _require_atomic_access; } 578 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); 579 #ifndef PRODUCT 580 virtual void dump_spec(outputStream *st) const { 581 StoreNode::dump_spec(st); 582 if (_require_atomic_access) st->print(" Atomic!"); 583 } 584 #endif 585 }; 586 587 //------------------------------StoreFNode------------------------------------- 588 // Store float to memory 589 class StoreFNode : public StoreNode { 590 public: 591 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 592 : StoreNode(c, mem, adr, at, val, mo) {} 593 virtual int Opcode() const; 594 virtual BasicType memory_type() const { return T_FLOAT; } 595 }; 596 597 //------------------------------StoreDNode------------------------------------- 598 // Store double to memory 599 class StoreDNode : public StoreNode { 600 public: 601 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 602 : StoreNode(c, mem, adr, at, val, mo) {} 603 virtual int Opcode() const; 604 virtual BasicType memory_type() const { return T_DOUBLE; } 605 }; 606 607 //------------------------------StorePNode------------------------------------- 608 // Store pointer to memory 609 class StorePNode : public StoreNode { 610 public: 611 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 612 : StoreNode(c, mem, adr, at, val, mo) {} 613 virtual int Opcode() const; 614 virtual BasicType memory_type() const { return T_ADDRESS; } 615 }; 616 617 //------------------------------StoreNNode------------------------------------- 618 // Store narrow oop to memory 619 class StoreNNode : public StoreNode { 620 public: 621 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 622 : StoreNode(c, mem, adr, at, val, mo) {} 623 virtual int Opcode() const; 624 virtual BasicType memory_type() const { return T_NARROWOOP; } 625 }; 626 627 //------------------------------StoreNKlassNode-------------------------------------- 628 // Store narrow klass to memory 629 class StoreNKlassNode : public StoreNNode { 630 public: 631 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) 632 : StoreNNode(c, mem, adr, at, val, mo) {} 633 virtual int Opcode() const; 634 virtual BasicType memory_type() const { return T_NARROWKLASS; } 635 }; 636 637 //------------------------------StoreCMNode----------------------------------- 638 // Store card-mark byte to memory for CM 639 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store 640 // Preceeding equivalent StoreCMs may be eliminated. 641 class StoreCMNode : public StoreNode { 642 private: 643 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } 644 virtual uint cmp( const Node &n ) const { 645 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx 646 && StoreNode::cmp(n); 647 } 648 virtual uint size_of() const { return sizeof(*this); } 649 int _oop_alias_idx; // The alias_idx of OopStore 650 651 public: 652 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : 653 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), 654 _oop_alias_idx(oop_alias_idx) { 655 assert(_oop_alias_idx >= Compile::AliasIdxRaw || 656 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, 657 "bad oop alias idx"); 658 } 659 virtual int Opcode() const; 660 virtual Node *Identity( PhaseTransform *phase ); 661 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 662 virtual const Type *Value( PhaseTransform *phase ) const; 663 virtual BasicType memory_type() const { return T_VOID; } // unspecific 664 int oop_alias_idx() const { return _oop_alias_idx; } 665 }; 666 667 //------------------------------LoadPLockedNode--------------------------------- 668 // Load-locked a pointer from memory (either object or array). 669 // On Sparc & Intel this is implemented as a normal pointer load. 670 // On PowerPC and friends it's a real load-locked. 671 class LoadPLockedNode : public LoadPNode { 672 public: 673 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) 674 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} 675 virtual int Opcode() const; 676 virtual int store_Opcode() const { return Op_StorePConditional; } 677 virtual bool depends_only_on_test() const { return true; } 678 }; 679 680 //------------------------------SCMemProjNode--------------------------------------- 681 // This class defines a projection of the memory state of a store conditional node. 682 // These nodes return a value, but also update memory. 683 class SCMemProjNode : public ProjNode { 684 public: 685 enum {SCMEMPROJCON = (uint)-2}; 686 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } 687 virtual int Opcode() const; 688 virtual bool is_CFG() const { return false; } 689 virtual const Type *bottom_type() const {return Type::MEMORY;} 690 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} 691 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register 692 virtual const Type *Value( PhaseTransform *phase ) const; 693 #ifndef PRODUCT 694 virtual void dump_spec(outputStream *st) const {}; 695 #endif 696 }; 697 698 //------------------------------LoadStoreNode--------------------------- 699 // Note: is_Mem() method returns 'true' for this class. 700 class LoadStoreNode : public Node { 701 private: 702 const Type* const _type; // What kind of value is loaded? 703 const TypePtr* _adr_type; // What kind of memory is being addressed? 704 virtual uint size_of() const; // Size is bigger 705 public: 706 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); 707 virtual bool depends_only_on_test() const { return false; } 708 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } 709 710 virtual const Type *bottom_type() const { return _type; } 711 virtual uint ideal_reg() const; 712 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address 713 714 bool result_not_used() const; 715 }; 716 717 class LoadStoreConditionalNode : public LoadStoreNode { 718 public: 719 enum { 720 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode 721 }; 722 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); 723 }; 724 725 //------------------------------StorePConditionalNode--------------------------- 726 // Conditionally store pointer to memory, if no change since prior 727 // load-locked. Sets flags for success or failure of the store. 728 class StorePConditionalNode : public LoadStoreConditionalNode { 729 public: 730 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 731 virtual int Opcode() const; 732 // Produces flags 733 virtual uint ideal_reg() const { return Op_RegFlags; } 734 }; 735 736 //------------------------------StoreIConditionalNode--------------------------- 737 // Conditionally store int to memory, if no change since prior 738 // load-locked. Sets flags for success or failure of the store. 739 class StoreIConditionalNode : public LoadStoreConditionalNode { 740 public: 741 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } 742 virtual int Opcode() const; 743 // Produces flags 744 virtual uint ideal_reg() const { return Op_RegFlags; } 745 }; 746 747 //------------------------------StoreLConditionalNode--------------------------- 748 // Conditionally store long to memory, if no change since prior 749 // load-locked. Sets flags for success or failure of the store. 750 class StoreLConditionalNode : public LoadStoreConditionalNode { 751 public: 752 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } 753 virtual int Opcode() const; 754 // Produces flags 755 virtual uint ideal_reg() const { return Op_RegFlags; } 756 }; 757 758 759 //------------------------------CompareAndSwapLNode--------------------------- 760 class CompareAndSwapLNode : public LoadStoreConditionalNode { 761 public: 762 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 763 virtual int Opcode() const; 764 }; 765 766 767 //------------------------------CompareAndSwapINode--------------------------- 768 class CompareAndSwapINode : public LoadStoreConditionalNode { 769 public: 770 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 771 virtual int Opcode() const; 772 }; 773 774 775 //------------------------------CompareAndSwapPNode--------------------------- 776 class CompareAndSwapPNode : public LoadStoreConditionalNode { 777 public: 778 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 779 virtual int Opcode() const; 780 }; 781 782 //------------------------------CompareAndSwapNNode--------------------------- 783 class CompareAndSwapNNode : public LoadStoreConditionalNode { 784 public: 785 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } 786 virtual int Opcode() const; 787 }; 788 789 //------------------------------GetAndAddINode--------------------------- 790 class GetAndAddINode : public LoadStoreNode { 791 public: 792 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 793 virtual int Opcode() const; 794 }; 795 796 //------------------------------GetAndAddLNode--------------------------- 797 class GetAndAddLNode : public LoadStoreNode { 798 public: 799 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 800 virtual int Opcode() const; 801 }; 802 803 804 //------------------------------GetAndSetINode--------------------------- 805 class GetAndSetINode : public LoadStoreNode { 806 public: 807 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } 808 virtual int Opcode() const; 809 }; 810 811 //------------------------------GetAndSetINode--------------------------- 812 class GetAndSetLNode : public LoadStoreNode { 813 public: 814 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } 815 virtual int Opcode() const; 816 }; 817 818 //------------------------------GetAndSetPNode--------------------------- 819 class GetAndSetPNode : public LoadStoreNode { 820 public: 821 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 822 virtual int Opcode() const; 823 }; 824 825 //------------------------------GetAndSetNNode--------------------------- 826 class GetAndSetNNode : public LoadStoreNode { 827 public: 828 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } 829 virtual int Opcode() const; 830 }; 831 832 //------------------------------ClearArray------------------------------------- 833 class ClearArrayNode: public Node { 834 public: 835 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) 836 : Node(ctrl,arymem,word_cnt,base) { 837 init_class_id(Class_ClearArray); 838 } 839 virtual int Opcode() const; 840 virtual const Type *bottom_type() const { return Type::MEMORY; } 841 // ClearArray modifies array elements, and so affects only the 842 // array memory addressed by the bottom_type of its base address. 843 virtual const class TypePtr *adr_type() const; 844 virtual Node *Identity( PhaseTransform *phase ); 845 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 846 virtual uint match_edge(uint idx) const; 847 848 // Clear the given area of an object or array. 849 // The start offset must always be aligned mod BytesPerInt. 850 // The end offset must always be aligned mod BytesPerLong. 851 // Return the new memory. 852 static Node* clear_memory(Node* control, Node* mem, Node* dest, 853 intptr_t start_offset, 854 intptr_t end_offset, 855 PhaseGVN* phase); 856 static Node* clear_memory(Node* control, Node* mem, Node* dest, 857 intptr_t start_offset, 858 Node* end_offset, 859 PhaseGVN* phase); 860 static Node* clear_memory(Node* control, Node* mem, Node* dest, 861 Node* start_offset, 862 Node* end_offset, 863 PhaseGVN* phase); 864 // Return allocation input memory edge if it is different instance 865 // or itself if it is the one we are looking for. 866 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); 867 }; 868 869 //------------------------------MemBar----------------------------------------- 870 // There are different flavors of Memory Barriers to match the Java Memory 871 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 872 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 873 // volatile-load. Monitor-exit and volatile-store act as Release: no 874 // preceding ref can be moved to after them. We insert a MemBar-Release 875 // before a FastUnlock or volatile-store. All volatiles need to be 876 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 877 // separate it from any following volatile-load. 878 class MemBarNode: public MultiNode { 879 virtual uint hash() const ; // { return NO_HASH; } 880 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 881 882 virtual uint size_of() const { return sizeof(*this); } 883 // Memory type this node is serializing. Usually either rawptr or bottom. 884 const TypePtr* _adr_type; 885 886 public: 887 enum { 888 Precedent = TypeFunc::Parms // optional edge to force precedence 889 }; 890 MemBarNode(Compile* C, int alias_idx, Node* precedent); 891 virtual int Opcode() const = 0; 892 virtual const class TypePtr *adr_type() const { return _adr_type; } 893 virtual const Type *Value( PhaseTransform *phase ) const; 894 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 895 virtual uint match_edge(uint idx) const { return 0; } 896 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } 897 virtual Node *match( const ProjNode *proj, const Matcher *m ); 898 // Factory method. Builds a wide or narrow membar. 899 // Optional 'precedent' becomes an extra edge if not null. 900 static MemBarNode* make(Compile* C, int opcode, 901 int alias_idx = Compile::AliasIdxBot, 902 Node* precedent = NULL); 903 }; 904 905 // "Acquire" - no following ref can move before (but earlier refs can 906 // follow, like an early Load stalled in cache). Requires multi-cpu 907 // visibility. Inserted after a volatile load. 908 class MemBarAcquireNode: public MemBarNode { 909 public: 910 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) 911 : MemBarNode(C, alias_idx, precedent) {} 912 virtual int Opcode() const; 913 }; 914 915 // "Acquire" - no following ref can move before (but earlier refs can 916 // follow, like an early Load stalled in cache). Requires multi-cpu 917 // visibility. Inserted independ of any load, as required 918 // for intrinsic sun.misc.Unsafe.loadFence(). 919 class LoadFenceNode: public MemBarNode { 920 public: 921 LoadFenceNode(Compile* C, int alias_idx, Node* precedent) 922 : MemBarNode(C, alias_idx, precedent) {} 923 virtual int Opcode() const; 924 }; 925 926 // "Release" - no earlier ref can move after (but later refs can move 927 // up, like a speculative pipelined cache-hitting Load). Requires 928 // multi-cpu visibility. Inserted before a volatile store. 929 class MemBarReleaseNode: public MemBarNode { 930 public: 931 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) 932 : MemBarNode(C, alias_idx, precedent) {} 933 virtual int Opcode() const; 934 }; 935 936 // "Release" - no earlier ref can move after (but later refs can move 937 // up, like a speculative pipelined cache-hitting Load). Requires 938 // multi-cpu visibility. Inserted independent of any store, as required 939 // for intrinsic sun.misc.Unsafe.storeFence(). 940 class StoreFenceNode: public MemBarNode { 941 public: 942 StoreFenceNode(Compile* C, int alias_idx, Node* precedent) 943 : MemBarNode(C, alias_idx, precedent) {} 944 virtual int Opcode() const; 945 }; 946 947 // "Acquire" - no following ref can move before (but earlier refs can 948 // follow, like an early Load stalled in cache). Requires multi-cpu 949 // visibility. Inserted after a FastLock. 950 class MemBarAcquireLockNode: public MemBarNode { 951 public: 952 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) 953 : MemBarNode(C, alias_idx, precedent) {} 954 virtual int Opcode() const; 955 }; 956 957 // "Release" - no earlier ref can move after (but later refs can move 958 // up, like a speculative pipelined cache-hitting Load). Requires 959 // multi-cpu visibility. Inserted before a FastUnLock. 960 class MemBarReleaseLockNode: public MemBarNode { 961 public: 962 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) 963 : MemBarNode(C, alias_idx, precedent) {} 964 virtual int Opcode() const; 965 }; 966 967 class MemBarStoreStoreNode: public MemBarNode { 968 public: 969 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) 970 : MemBarNode(C, alias_idx, precedent) { 971 init_class_id(Class_MemBarStoreStore); 972 } 973 virtual int Opcode() const; 974 }; 975 976 // Ordering between a volatile store and a following volatile load. 977 // Requires multi-CPU visibility? 978 class MemBarVolatileNode: public MemBarNode { 979 public: 980 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) 981 : MemBarNode(C, alias_idx, precedent) {} 982 virtual int Opcode() const; 983 }; 984 985 // Ordering within the same CPU. Used to order unsafe memory references 986 // inside the compiler when we lack alias info. Not needed "outside" the 987 // compiler because the CPU does all the ordering for us. 988 class MemBarCPUOrderNode: public MemBarNode { 989 public: 990 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) 991 : MemBarNode(C, alias_idx, precedent) {} 992 virtual int Opcode() const; 993 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 994 }; 995 996 // Isolation of object setup after an AllocateNode and before next safepoint. 997 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) 998 class InitializeNode: public MemBarNode { 999 friend class AllocateNode; 1000 1001 enum { 1002 Incomplete = 0, 1003 Complete = 1, 1004 WithArraycopy = 2 1005 }; 1006 int _is_complete; 1007 1008 bool _does_not_escape; 1009 1010 public: 1011 enum { 1012 Control = TypeFunc::Control, 1013 Memory = TypeFunc::Memory, // MergeMem for states affected by this op 1014 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address 1015 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) 1016 }; 1017 1018 InitializeNode(Compile* C, int adr_type, Node* rawoop); 1019 virtual int Opcode() const; 1020 virtual uint size_of() const { return sizeof(*this); } 1021 virtual uint ideal_reg() const { return 0; } // not matched in the AD file 1022 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress 1023 1024 // Manage incoming memory edges via a MergeMem on in(Memory): 1025 Node* memory(uint alias_idx); 1026 1027 // The raw memory edge coming directly from the Allocation. 1028 // The contents of this memory are *always* all-zero-bits. 1029 Node* zero_memory() { return memory(Compile::AliasIdxRaw); } 1030 1031 // Return the corresponding allocation for this initialization (or null if none). 1032 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 1033 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 1034 AllocateNode* allocation(); 1035 1036 // Anything other than zeroing in this init? 1037 bool is_non_zero(); 1038 1039 // An InitializeNode must completed before macro expansion is done. 1040 // Completion requires that the AllocateNode must be followed by 1041 // initialization of the new memory to zero, then to any initializers. 1042 bool is_complete() { return _is_complete != Incomplete; } 1043 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } 1044 1045 // Mark complete. (Must not yet be complete.) 1046 void set_complete(PhaseGVN* phase); 1047 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } 1048 1049 bool does_not_escape() { return _does_not_escape; } 1050 void set_does_not_escape() { _does_not_escape = true; } 1051 1052 #ifdef ASSERT 1053 // ensure all non-degenerate stores are ordered and non-overlapping 1054 bool stores_are_sane(PhaseTransform* phase); 1055 #endif //ASSERT 1056 1057 // See if this store can be captured; return offset where it initializes. 1058 // Return 0 if the store cannot be moved (any sort of problem). 1059 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); 1060 1061 // Capture another store; reformat it to write my internal raw memory. 1062 // Return the captured copy, else NULL if there is some sort of problem. 1063 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); 1064 1065 // Find captured store which corresponds to the range [start..start+size). 1066 // Return my own memory projection (meaning the initial zero bits) 1067 // if there is no such store. Return NULL if there is a problem. 1068 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); 1069 1070 // Called when the associated AllocateNode is expanded into CFG. 1071 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, 1072 intptr_t header_size, Node* size_in_bytes, 1073 PhaseGVN* phase); 1074 1075 private: 1076 void remove_extra_zeroes(); 1077 1078 // Find out where a captured store should be placed (or already is placed). 1079 int captured_store_insertion_point(intptr_t start, int size_in_bytes, 1080 PhaseTransform* phase); 1081 1082 static intptr_t get_store_offset(Node* st, PhaseTransform* phase); 1083 1084 Node* make_raw_address(intptr_t offset, PhaseTransform* phase); 1085 1086 bool detect_init_independence(Node* n, int& count); 1087 1088 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, 1089 PhaseGVN* phase); 1090 1091 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); 1092 }; 1093 1094 //------------------------------MergeMem--------------------------------------- 1095 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) 1096 class MergeMemNode: public Node { 1097 virtual uint hash() const ; // { return NO_HASH; } 1098 virtual uint cmp( const Node &n ) const ; // Always fail, except on self 1099 friend class MergeMemStream; 1100 MergeMemNode(Node* def); // clients use MergeMemNode::make 1101 1102 public: 1103 // If the input is a whole memory state, clone it with all its slices intact. 1104 // Otherwise, make a new memory state with just that base memory input. 1105 // In either case, the result is a newly created MergeMem. 1106 static MergeMemNode* make(Compile* C, Node* base_memory); 1107 1108 virtual int Opcode() const; 1109 virtual Node *Identity( PhaseTransform *phase ); 1110 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1111 virtual uint ideal_reg() const { return NotAMachineReg; } 1112 virtual uint match_edge(uint idx) const { return 0; } 1113 virtual const RegMask &out_RegMask() const; 1114 virtual const Type *bottom_type() const { return Type::MEMORY; } 1115 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 1116 // sparse accessors 1117 // Fetch the previously stored "set_memory_at", or else the base memory. 1118 // (Caller should clone it if it is a phi-nest.) 1119 Node* memory_at(uint alias_idx) const; 1120 // set the memory, regardless of its previous value 1121 void set_memory_at(uint alias_idx, Node* n); 1122 // the "base" is the memory that provides the non-finite support 1123 Node* base_memory() const { return in(Compile::AliasIdxBot); } 1124 // warning: setting the base can implicitly set any of the other slices too 1125 void set_base_memory(Node* def); 1126 // sentinel value which denotes a copy of the base memory: 1127 Node* empty_memory() const { return in(Compile::AliasIdxTop); } 1128 static Node* make_empty_memory(); // where the sentinel comes from 1129 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } 1130 // hook for the iterator, to perform any necessary setup 1131 void iteration_setup(const MergeMemNode* other = NULL); 1132 // push sentinels until I am at least as long as the other (semantic no-op) 1133 void grow_to_match(const MergeMemNode* other); 1134 bool verify_sparse() const PRODUCT_RETURN0; 1135 #ifndef PRODUCT 1136 virtual void dump_spec(outputStream *st) const; 1137 #endif 1138 }; 1139 1140 class MergeMemStream : public StackObj { 1141 private: 1142 MergeMemNode* _mm; 1143 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations 1144 Node* _mm_base; // loop-invariant base memory of _mm 1145 int _idx; 1146 int _cnt; 1147 Node* _mem; 1148 Node* _mem2; 1149 int _cnt2; 1150 1151 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { 1152 // subsume_node will break sparseness at times, whenever a memory slice 1153 // folds down to a copy of the base ("fat") memory. In such a case, 1154 // the raw edge will update to base, although it should be top. 1155 // This iterator will recognize either top or base_memory as an 1156 // "empty" slice. See is_empty, is_empty2, and next below. 1157 // 1158 // The sparseness property is repaired in MergeMemNode::Ideal. 1159 // As long as access to a MergeMem goes through this iterator 1160 // or the memory_at accessor, flaws in the sparseness will 1161 // never be observed. 1162 // 1163 // Also, iteration_setup repairs sparseness. 1164 assert(mm->verify_sparse(), "please, no dups of base"); 1165 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); 1166 1167 _mm = mm; 1168 _mm_base = mm->base_memory(); 1169 _mm2 = mm2; 1170 _cnt = mm->req(); 1171 _idx = Compile::AliasIdxBot-1; // start at the base memory 1172 _mem = NULL; 1173 _mem2 = NULL; 1174 } 1175 1176 #ifdef ASSERT 1177 Node* check_memory() const { 1178 if (at_base_memory()) 1179 return _mm->base_memory(); 1180 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) 1181 return _mm->memory_at(_idx); 1182 else 1183 return _mm_base; 1184 } 1185 Node* check_memory2() const { 1186 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); 1187 } 1188 #endif 1189 1190 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; 1191 void assert_synch() const { 1192 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), 1193 "no side-effects except through the stream"); 1194 } 1195 1196 public: 1197 1198 // expected usages: 1199 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } 1200 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } 1201 1202 // iterate over one merge 1203 MergeMemStream(MergeMemNode* mm) { 1204 mm->iteration_setup(); 1205 init(mm); 1206 debug_only(_cnt2 = 999); 1207 } 1208 // iterate in parallel over two merges 1209 // only iterates through non-empty elements of mm2 1210 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { 1211 assert(mm2, "second argument must be a MergeMem also"); 1212 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state 1213 mm->iteration_setup(mm2); 1214 init(mm, mm2); 1215 _cnt2 = mm2->req(); 1216 } 1217 #ifdef ASSERT 1218 ~MergeMemStream() { 1219 assert_synch(); 1220 } 1221 #endif 1222 1223 MergeMemNode* all_memory() const { 1224 return _mm; 1225 } 1226 Node* base_memory() const { 1227 assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); 1228 return _mm_base; 1229 } 1230 const MergeMemNode* all_memory2() const { 1231 assert(_mm2 != NULL, ""); 1232 return _mm2; 1233 } 1234 bool at_base_memory() const { 1235 return _idx == Compile::AliasIdxBot; 1236 } 1237 int alias_idx() const { 1238 assert(_mem, "must call next 1st"); 1239 return _idx; 1240 } 1241 1242 const TypePtr* adr_type() const { 1243 return Compile::current()->get_adr_type(alias_idx()); 1244 } 1245 1246 const TypePtr* adr_type(Compile* C) const { 1247 return C->get_adr_type(alias_idx()); 1248 } 1249 bool is_empty() const { 1250 assert(_mem, "must call next 1st"); 1251 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); 1252 return _mem->is_top(); 1253 } 1254 bool is_empty2() const { 1255 assert(_mem2, "must call next 1st"); 1256 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); 1257 return _mem2->is_top(); 1258 } 1259 Node* memory() const { 1260 assert(!is_empty(), "must not be empty"); 1261 assert_synch(); 1262 return _mem; 1263 } 1264 // get the current memory, regardless of empty or non-empty status 1265 Node* force_memory() const { 1266 assert(!is_empty() || !at_base_memory(), ""); 1267 // Use _mm_base to defend against updates to _mem->base_memory(). 1268 Node *mem = _mem->is_top() ? _mm_base : _mem; 1269 assert(mem == check_memory(), ""); 1270 return mem; 1271 } 1272 Node* memory2() const { 1273 assert(_mem2 == check_memory2(), ""); 1274 return _mem2; 1275 } 1276 void set_memory(Node* mem) { 1277 if (at_base_memory()) { 1278 // Note that this does not change the invariant _mm_base. 1279 _mm->set_base_memory(mem); 1280 } else { 1281 _mm->set_memory_at(_idx, mem); 1282 } 1283 _mem = mem; 1284 assert_synch(); 1285 } 1286 1287 // Recover from a side effect to the MergeMemNode. 1288 void set_memory() { 1289 _mem = _mm->in(_idx); 1290 } 1291 1292 bool next() { return next(false); } 1293 bool next2() { return next(true); } 1294 1295 bool next_non_empty() { return next_non_empty(false); } 1296 bool next_non_empty2() { return next_non_empty(true); } 1297 // next_non_empty2 can yield states where is_empty() is true 1298 1299 private: 1300 // find the next item, which might be empty 1301 bool next(bool have_mm2) { 1302 assert((_mm2 != NULL) == have_mm2, "use other next"); 1303 assert_synch(); 1304 if (++_idx < _cnt) { 1305 // Note: This iterator allows _mm to be non-sparse. 1306 // It behaves the same whether _mem is top or base_memory. 1307 _mem = _mm->in(_idx); 1308 if (have_mm2) 1309 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); 1310 return true; 1311 } 1312 return false; 1313 } 1314 1315 // find the next non-empty item 1316 bool next_non_empty(bool have_mm2) { 1317 while (next(have_mm2)) { 1318 if (!is_empty()) { 1319 // make sure _mem2 is filled in sensibly 1320 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); 1321 return true; 1322 } else if (have_mm2 && !is_empty2()) { 1323 return true; // is_empty() == true 1324 } 1325 } 1326 return false; 1327 } 1328 }; 1329 1330 //------------------------------Prefetch--------------------------------------- 1331 1332 // Non-faulting prefetch load. Prefetch for many reads. 1333 class PrefetchReadNode : public Node { 1334 public: 1335 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} 1336 virtual int Opcode() const; 1337 virtual uint ideal_reg() const { return NotAMachineReg; } 1338 virtual uint match_edge(uint idx) const { return idx==2; } 1339 virtual const Type *bottom_type() const { return Type::ABIO; } 1340 }; 1341 1342 // Non-faulting prefetch load. Prefetch for many reads & many writes. 1343 class PrefetchWriteNode : public Node { 1344 public: 1345 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} 1346 virtual int Opcode() const; 1347 virtual uint ideal_reg() const { return NotAMachineReg; } 1348 virtual uint match_edge(uint idx) const { return idx==2; } 1349 virtual const Type *bottom_type() const { return Type::ABIO; } 1350 }; 1351 1352 // Allocation prefetch which may fault, TLAB size have to be adjusted. 1353 class PrefetchAllocationNode : public Node { 1354 public: 1355 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} 1356 virtual int Opcode() const; 1357 virtual uint ideal_reg() const { return NotAMachineReg; } 1358 virtual uint match_edge(uint idx) const { return idx==2; } 1359 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 1360 }; 1361 1362 #endif // SHARE_VM_OPTO_MEMNODE_HPP