src/share/vm/opto/memnode.hpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.


 117   }
 118 
 119   // Search through memory states which precede this node (load or store).
 120   // Look for an exact match for the address, with no intervening
 121   // aliased stores.
 122   Node* find_previous_store(PhaseTransform* phase);
 123 
 124   // Can this node (load or store) accurately see a stored value in
 125   // the given memory state?  (The state may or may not be in(Memory).)
 126   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
 127 
 128 #ifndef PRODUCT
 129   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
 130   virtual void dump_spec(outputStream *st) const;
 131 #endif
 132 };
 133 
 134 //------------------------------LoadNode---------------------------------------
 135 // Load value; requires Memory and Address
 136 class LoadNode : public MemNode {










 137 protected:
 138   virtual uint cmp( const Node &n ) const;
 139   virtual uint size_of() const; // Size is bigger
 140   const Type* const _type;      // What kind of value is loaded?
 141 public:
 142 
 143   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
 144     : MemNode(c,mem,adr,at), _type(rt) {
 145     init_class_id(Class_Load);
 146   }





 147 
 148   // Polymorphic factory method:
 149   static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
 150                      const TypePtr* at, const Type *rt, BasicType bt );
 151 
 152   virtual uint hash()   const;  // Check the type
 153 
 154   // Handle algebraic identities here.  If we have an identity, return the Node
 155   // we are equivalent to.  We look for Load of a Store.
 156   virtual Node *Identity( PhaseTransform *phase );
 157 
 158   // If the load is from Field memory and the pointer is non-null, we can
 159   // zero out the control input.
 160   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 161 
 162   // Split instance field load through Phi.
 163   Node* split_through_phi(PhaseGVN *phase);
 164 
 165   // Recover original value from boxed values
 166   Node *eliminate_autobox(PhaseGVN *phase);
 167 
 168   // Compute a new Type for this node.  Basically we just do the pre-check,
 169   // then call the virtual add() to set the type.
 170   virtual const Type *Value( PhaseTransform *phase ) const;


 193 
 194   // Check if the load's memory input is a Phi node with the same control.
 195   bool is_instance_field_load_with_local_phi(Node* ctrl);
 196 
 197 #ifndef PRODUCT
 198   virtual void dump_spec(outputStream *st) const;
 199 #endif
 200 #ifdef ASSERT
 201   // Helper function to allow a raw load without control edge for some cases
 202   static bool is_immutable_value(Node* adr);
 203 #endif
 204 protected:
 205   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 206                                      ciKlass* klass) const;
 207 };
 208 
 209 //------------------------------LoadBNode--------------------------------------
 210 // Load a byte (8bits signed) from memory
 211 class LoadBNode : public LoadNode {
 212 public:
 213   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
 214     : LoadNode(c,mem,adr,at,ti) {}
 215   virtual int Opcode() const;
 216   virtual uint ideal_reg() const { return Op_RegI; }
 217   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 218   virtual const Type *Value(PhaseTransform *phase) const;
 219   virtual int store_Opcode() const { return Op_StoreB; }
 220   virtual BasicType memory_type() const { return T_BYTE; }
 221 };
 222 
 223 //------------------------------LoadUBNode-------------------------------------
 224 // Load a unsigned byte (8bits unsigned) from memory
 225 class LoadUBNode : public LoadNode {
 226 public:
 227   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
 228     : LoadNode(c, mem, adr, at, ti) {}
 229   virtual int Opcode() const;
 230   virtual uint ideal_reg() const { return Op_RegI; }
 231   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
 232   virtual const Type *Value(PhaseTransform *phase) const;
 233   virtual int store_Opcode() const { return Op_StoreB; }
 234   virtual BasicType memory_type() const { return T_BYTE; }
 235 };
 236 
 237 //------------------------------LoadUSNode-------------------------------------
 238 // Load an unsigned short/char (16bits unsigned) from memory
 239 class LoadUSNode : public LoadNode {
 240 public:
 241   LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
 242     : LoadNode(c,mem,adr,at,ti) {}
 243   virtual int Opcode() const;
 244   virtual uint ideal_reg() const { return Op_RegI; }
 245   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 246   virtual const Type *Value(PhaseTransform *phase) const;
 247   virtual int store_Opcode() const { return Op_StoreC; }
 248   virtual BasicType memory_type() const { return T_CHAR; }
 249 };
 250 
 251 //------------------------------LoadSNode--------------------------------------
 252 // Load a short (16bits signed) from memory
 253 class LoadSNode : public LoadNode {
 254 public:
 255   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
 256     : LoadNode(c,mem,adr,at,ti) {}
 257   virtual int Opcode() const;
 258   virtual uint ideal_reg() const { return Op_RegI; }
 259   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 260   virtual const Type *Value(PhaseTransform *phase) const;
 261   virtual int store_Opcode() const { return Op_StoreC; }
 262   virtual BasicType memory_type() const { return T_SHORT; }
 263 };
 264 
 265 //------------------------------LoadINode--------------------------------------
 266 // Load an integer from memory
 267 class LoadINode : public LoadNode {
 268 public:
 269   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
 270     : LoadNode(c,mem,adr,at,ti) {}
 271   virtual int Opcode() const;
 272   virtual uint ideal_reg() const { return Op_RegI; }
 273   virtual int store_Opcode() const { return Op_StoreI; }
 274   virtual BasicType memory_type() const { return T_INT; }
 275 };
 276 
 277 //------------------------------LoadRangeNode----------------------------------
 278 // Load an array length from the array
 279 class LoadRangeNode : public LoadINode {
 280 public:
 281   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
 282     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
 283   virtual int Opcode() const;
 284   virtual const Type *Value( PhaseTransform *phase ) const;
 285   virtual Node *Identity( PhaseTransform *phase );
 286   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 287 };
 288 
 289 //------------------------------LoadLNode--------------------------------------
 290 // Load a long from memory
 291 class LoadLNode : public LoadNode {
 292   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 293   virtual uint cmp( const Node &n ) const {
 294     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
 295       && LoadNode::cmp(n);
 296   }
 297   virtual uint size_of() const { return sizeof(*this); }
 298   const bool _require_atomic_access;  // is piecewise load forbidden?
 299 
 300 public:
 301   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
 302              const TypeLong *tl = TypeLong::LONG,
 303              bool require_atomic_access = false )
 304     : LoadNode(c,mem,adr,at,tl)
 305     , _require_atomic_access(require_atomic_access)
 306   {}
 307   virtual int Opcode() const;
 308   virtual uint ideal_reg() const { return Op_RegL; }
 309   virtual int store_Opcode() const { return Op_StoreL; }
 310   virtual BasicType memory_type() const { return T_LONG; }
 311   bool require_atomic_access() { return _require_atomic_access; }
 312   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);

 313 #ifndef PRODUCT
 314   virtual void dump_spec(outputStream *st) const {
 315     LoadNode::dump_spec(st);
 316     if (_require_atomic_access)  st->print(" Atomic!");
 317   }
 318 #endif
 319 };
 320 
 321 //------------------------------LoadL_unalignedNode----------------------------
 322 // Load a long from unaligned memory
 323 class LoadL_unalignedNode : public LoadLNode {
 324 public:
 325   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
 326     : LoadLNode(c,mem,adr,at) {}
 327   virtual int Opcode() const;
 328 };
 329 
 330 //------------------------------LoadFNode--------------------------------------
 331 // Load a float (64 bits) from memory
 332 class LoadFNode : public LoadNode {
 333 public:
 334   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
 335     : LoadNode(c,mem,adr,at,t) {}
 336   virtual int Opcode() const;
 337   virtual uint ideal_reg() const { return Op_RegF; }
 338   virtual int store_Opcode() const { return Op_StoreF; }
 339   virtual BasicType memory_type() const { return T_FLOAT; }
 340 };
 341 
 342 //------------------------------LoadDNode--------------------------------------
 343 // Load a double (64 bits) from memory
 344 class LoadDNode : public LoadNode {
 345 public:
 346   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
 347     : LoadNode(c,mem,adr,at,t) {}
 348   virtual int Opcode() const;
 349   virtual uint ideal_reg() const { return Op_RegD; }
 350   virtual int store_Opcode() const { return Op_StoreD; }
 351   virtual BasicType memory_type() const { return T_DOUBLE; }
 352 };
 353 
 354 //------------------------------LoadD_unalignedNode----------------------------
 355 // Load a double from unaligned memory
 356 class LoadD_unalignedNode : public LoadDNode {
 357 public:
 358   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
 359     : LoadDNode(c,mem,adr,at) {}
 360   virtual int Opcode() const;
 361 };
 362 
 363 //------------------------------LoadPNode--------------------------------------
 364 // Load a pointer from memory (either object or array)
 365 class LoadPNode : public LoadNode {
 366 public:
 367   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
 368     : LoadNode(c,mem,adr,at,t) {}
 369   virtual int Opcode() const;
 370   virtual uint ideal_reg() const { return Op_RegP; }
 371   virtual int store_Opcode() const { return Op_StoreP; }
 372   virtual BasicType memory_type() const { return T_ADDRESS; }
 373   // depends_only_on_test is almost always true, and needs to be almost always
 374   // true to enable key hoisting & commoning optimizations.  However, for the
 375   // special case of RawPtr loads from TLS top & end, the control edge carries
 376   // the dependence preventing hoisting past a Safepoint instead of the memory
 377   // edge.  (An unfortunate consequence of having Safepoints not set Raw
 378   // Memory; itself an unfortunate consequence of having Nodes which produce
 379   // results (new raw memory state) inside of loops preventing all manner of
 380   // other optimizations).  Basically, it's ugly but so is the alternative.
 381   // See comment in macro.cpp, around line 125 expand_allocate_common().
 382   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 383 };
 384 
 385 
 386 //------------------------------LoadNNode--------------------------------------
 387 // Load a narrow oop from memory (either object or array)
 388 class LoadNNode : public LoadNode {
 389 public:
 390   LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
 391     : LoadNode(c,mem,adr,at,t) {}
 392   virtual int Opcode() const;
 393   virtual uint ideal_reg() const { return Op_RegN; }
 394   virtual int store_Opcode() const { return Op_StoreN; }
 395   virtual BasicType memory_type() const { return T_NARROWOOP; }
 396   // depends_only_on_test is almost always true, and needs to be almost always
 397   // true to enable key hoisting & commoning optimizations.  However, for the
 398   // special case of RawPtr loads from TLS top & end, the control edge carries
 399   // the dependence preventing hoisting past a Safepoint instead of the memory
 400   // edge.  (An unfortunate consequence of having Safepoints not set Raw
 401   // Memory; itself an unfortunate consequence of having Nodes which produce
 402   // results (new raw memory state) inside of loops preventing all manner of
 403   // other optimizations).  Basically, it's ugly but so is the alternative.
 404   // See comment in macro.cpp, around line 125 expand_allocate_common().
 405   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 406 };
 407 
 408 //------------------------------LoadKlassNode----------------------------------
 409 // Load a Klass from an object
 410 class LoadKlassNode : public LoadPNode {
 411 public:
 412   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
 413     : LoadPNode(c,mem,adr,at,tk) {}
 414   virtual int Opcode() const;
 415   virtual const Type *Value( PhaseTransform *phase ) const;
 416   virtual Node *Identity( PhaseTransform *phase );
 417   virtual bool depends_only_on_test() const { return true; }
 418 
 419   // Polymorphic factory method:
 420   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
 421                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
 422 };
 423 
 424 //------------------------------LoadNKlassNode---------------------------------
 425 // Load a narrow Klass from an object.
 426 class LoadNKlassNode : public LoadNNode {
 427 public:
 428   LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk )
 429     : LoadNNode(c,mem,adr,at,tk) {}
 430   virtual int Opcode() const;
 431   virtual uint ideal_reg() const { return Op_RegN; }
 432   virtual int store_Opcode() const { return Op_StoreNKlass; }
 433   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 434 
 435   virtual const Type *Value( PhaseTransform *phase ) const;
 436   virtual Node *Identity( PhaseTransform *phase );
 437   virtual bool depends_only_on_test() const { return true; }
 438 };
 439 
 440 
 441 //------------------------------StoreNode--------------------------------------
 442 // Store value; requires Store, Address and Value
 443 class StoreNode : public MemNode {










 444 protected:
 445   virtual uint cmp( const Node &n ) const;
 446   virtual bool depends_only_on_test() const { return false; }
 447 
 448   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 449   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 450 
 451 public:
 452   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
 453     : MemNode(c,mem,adr,at,val) {





 454     init_class_id(Class_Store);
 455   }
 456   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
 457     : MemNode(c,mem,adr,at,val,oop_store) {
 458     init_class_id(Class_Store);
 459   }
 460 
 461   // Polymorphic factory method:
 462   static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
 463                           const TypePtr* at, Node *val, BasicType bt );





















 464 
 465   virtual uint hash() const;    // Check the type
 466 
 467   // If the store is to Field memory and the pointer is non-null, we can
 468   // zero out the control input.
 469   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 470 
 471   // Compute a new Type for this node.  Basically we just do the pre-check,
 472   // then call the virtual add() to set the type.
 473   virtual const Type *Value( PhaseTransform *phase ) const;
 474 
 475   // Check for identity function on memory (Load then Store at same address)
 476   virtual Node *Identity( PhaseTransform *phase );
 477 
 478   // Do not match memory edge
 479   virtual uint match_edge(uint idx) const;
 480 
 481   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 482 
 483   // Map a store opcode to its corresponding own opcode, trivially.
 484   virtual int store_Opcode() const { return Opcode(); }
 485 
 486   // have all possible loads of the value stored been optimized away?
 487   bool value_never_loaded(PhaseTransform *phase) const;
 488 };
 489 
 490 //------------------------------StoreBNode-------------------------------------
 491 // Store byte to memory
 492 class StoreBNode : public StoreNode {
 493 public:
 494   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 495   virtual int Opcode() const;
 496   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 497   virtual BasicType memory_type() const { return T_BYTE; }
 498 };
 499 
 500 //------------------------------StoreCNode-------------------------------------
 501 // Store char/short to memory
 502 class StoreCNode : public StoreNode {
 503 public:
 504   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 505   virtual int Opcode() const;
 506   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 507   virtual BasicType memory_type() const { return T_CHAR; }
 508 };
 509 
 510 //------------------------------StoreINode-------------------------------------
 511 // Store int to memory
 512 class StoreINode : public StoreNode {
 513 public:
 514   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 515   virtual int Opcode() const;
 516   virtual BasicType memory_type() const { return T_INT; }
 517 };
 518 
 519 //------------------------------StoreLNode-------------------------------------
 520 // Store long to memory
 521 class StoreLNode : public StoreNode {
 522   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 523   virtual uint cmp( const Node &n ) const {
 524     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
 525       && StoreNode::cmp(n);
 526   }
 527   virtual uint size_of() const { return sizeof(*this); }
 528   const bool _require_atomic_access;  // is piecewise store forbidden?
 529 
 530 public:
 531   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
 532               bool require_atomic_access = false )
 533     : StoreNode(c,mem,adr,at,val)
 534     , _require_atomic_access(require_atomic_access)
 535   {}
 536   virtual int Opcode() const;
 537   virtual BasicType memory_type() const { return T_LONG; }
 538   bool require_atomic_access() { return _require_atomic_access; }
 539   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
 540 #ifndef PRODUCT
 541   virtual void dump_spec(outputStream *st) const {
 542     StoreNode::dump_spec(st);
 543     if (_require_atomic_access)  st->print(" Atomic!");
 544   }
 545 #endif
 546 };
 547 
 548 //------------------------------StoreFNode-------------------------------------
 549 // Store float to memory
 550 class StoreFNode : public StoreNode {
 551 public:
 552   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 553   virtual int Opcode() const;
 554   virtual BasicType memory_type() const { return T_FLOAT; }
 555 };
 556 
 557 //------------------------------StoreDNode-------------------------------------
 558 // Store double to memory
 559 class StoreDNode : public StoreNode {
 560 public:
 561   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 562   virtual int Opcode() const;
 563   virtual BasicType memory_type() const { return T_DOUBLE; }
 564 };
 565 
 566 //------------------------------StorePNode-------------------------------------
 567 // Store pointer to memory
 568 class StorePNode : public StoreNode {
 569 public:
 570   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 571   virtual int Opcode() const;
 572   virtual BasicType memory_type() const { return T_ADDRESS; }
 573 };
 574 
 575 //------------------------------StoreNNode-------------------------------------
 576 // Store narrow oop to memory
 577 class StoreNNode : public StoreNode {
 578 public:
 579   StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}

 580   virtual int Opcode() const;
 581   virtual BasicType memory_type() const { return T_NARROWOOP; }
 582 };
 583 
 584 //------------------------------StoreNKlassNode--------------------------------------
 585 // Store narrow klass to memory
 586 class StoreNKlassNode : public StoreNNode {
 587 public:
 588   StoreNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNNode(c,mem,adr,at,val) {}

 589   virtual int Opcode() const;
 590   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 591 };
 592 
 593 //------------------------------StoreCMNode-----------------------------------
 594 // Store card-mark byte to memory for CM
 595 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
 596 // Preceeding equivalent StoreCMs may be eliminated.
 597 class StoreCMNode : public StoreNode {
 598  private:
 599   virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
 600   virtual uint cmp( const Node &n ) const {
 601     return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
 602       && StoreNode::cmp(n);
 603   }
 604   virtual uint size_of() const { return sizeof(*this); }
 605   int _oop_alias_idx;   // The alias_idx of OopStore
 606 
 607 public:
 608   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
 609     StoreNode(c,mem,adr,at,val,oop_store),
 610     _oop_alias_idx(oop_alias_idx) {
 611     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
 612            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
 613            "bad oop alias idx");
 614   }
 615   virtual int Opcode() const;
 616   virtual Node *Identity( PhaseTransform *phase );
 617   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 618   virtual const Type *Value( PhaseTransform *phase ) const;
 619   virtual BasicType memory_type() const { return T_VOID; } // unspecific
 620   int oop_alias_idx() const { return _oop_alias_idx; }
 621 };
 622 
 623 //------------------------------LoadPLockedNode---------------------------------
 624 // Load-locked a pointer from memory (either object or array).
 625 // On Sparc & Intel this is implemented as a normal pointer load.
 626 // On PowerPC and friends it's a real load-locked.
 627 class LoadPLockedNode : public LoadPNode {
 628 public:
 629   LoadPLockedNode( Node *c, Node *mem, Node *adr )
 630     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
 631   virtual int Opcode() const;
 632   virtual int store_Opcode() const { return Op_StorePConditional; }
 633   virtual bool depends_only_on_test() const { return true; }
 634 };
 635 
 636 //------------------------------SCMemProjNode---------------------------------------
 637 // This class defines a projection of the memory  state of a store conditional node.
 638 // These nodes return a value, but also update memory.
 639 class SCMemProjNode : public ProjNode {
 640 public:
 641   enum {SCMEMPROJCON = (uint)-2};
 642   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
 643   virtual int Opcode() const;
 644   virtual bool      is_CFG() const  { return false; }
 645   virtual const Type *bottom_type() const {return Type::MEMORY;}
 646   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
 647   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
 648   virtual const Type *Value( PhaseTransform *phase ) const;
 649 #ifndef PRODUCT
 650   virtual void dump_spec(outputStream *st) const {};




 117   }
 118 
 119   // Search through memory states which precede this node (load or store).
 120   // Look for an exact match for the address, with no intervening
 121   // aliased stores.
 122   Node* find_previous_store(PhaseTransform* phase);
 123 
 124   // Can this node (load or store) accurately see a stored value in
 125   // the given memory state?  (The state may or may not be in(Memory).)
 126   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
 127 
 128 #ifndef PRODUCT
 129   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
 130   virtual void dump_spec(outputStream *st) const;
 131 #endif
 132 };
 133 
 134 //------------------------------LoadNode---------------------------------------
 135 // Load value; requires Memory and Address
 136 class LoadNode : public MemNode {
 137 public:
 138   typedef enum { unordered = 0, acquire } Sem;
 139 
 140 private:
 141   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
 142   // loads that can be reordered, and such requiring acquire semantics to
 143   // adhere to the Java specification.  The required behaviour is stored in
 144   // this field.
 145   const Sem _sem;
 146 
 147 protected:
 148   virtual uint cmp(const Node &n) const;
 149   virtual uint size_of() const; // Size is bigger
 150   const Type* const _type;      // What kind of value is loaded?
 151 public:
 152 
 153   LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, Sem sem)
 154     : MemNode(c,mem,adr,at), _type(rt), _sem(sem) {
 155     init_class_id(Class_Load);
 156   }
 157   inline bool is_unordered() const { return !is_acquire(); }
 158   inline bool is_acquire() const {
 159     assert(_sem == unordered || _sem == acquire, "unexpected");
 160     return _sem == acquire;
 161   }
 162 
 163   // Polymorphic factory method:
 164    static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
 165                      const TypePtr* at, const Type *rt, BasicType bt, Sem sem);
 166 
 167   virtual uint hash()   const;  // Check the type
 168 
 169   // Handle algebraic identities here.  If we have an identity, return the Node
 170   // we are equivalent to.  We look for Load of a Store.
 171   virtual Node *Identity( PhaseTransform *phase );
 172 
 173   // If the load is from Field memory and the pointer is non-null, we can
 174   // zero out the control input.
 175   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 176 
 177   // Split instance field load through Phi.
 178   Node* split_through_phi(PhaseGVN *phase);
 179 
 180   // Recover original value from boxed values
 181   Node *eliminate_autobox(PhaseGVN *phase);
 182 
 183   // Compute a new Type for this node.  Basically we just do the pre-check,
 184   // then call the virtual add() to set the type.
 185   virtual const Type *Value( PhaseTransform *phase ) const;


 208 
 209   // Check if the load's memory input is a Phi node with the same control.
 210   bool is_instance_field_load_with_local_phi(Node* ctrl);
 211 
 212 #ifndef PRODUCT
 213   virtual void dump_spec(outputStream *st) const;
 214 #endif
 215 #ifdef ASSERT
 216   // Helper function to allow a raw load without control edge for some cases
 217   static bool is_immutable_value(Node* adr);
 218 #endif
 219 protected:
 220   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 221                                      ciKlass* klass) const;
 222 };
 223 
 224 //------------------------------LoadBNode--------------------------------------
 225 // Load a byte (8bits signed) from memory
 226 class LoadBNode : public LoadNode {
 227 public:
 228   LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, Sem sem)
 229     : LoadNode(c, mem, adr, at, ti, sem) {}
 230   virtual int Opcode() const;
 231   virtual uint ideal_reg() const { return Op_RegI; }
 232   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 233   virtual const Type *Value(PhaseTransform *phase) const;
 234   virtual int store_Opcode() const { return Op_StoreB; }
 235   virtual BasicType memory_type() const { return T_BYTE; }
 236 };
 237 
 238 //------------------------------LoadUBNode-------------------------------------
 239 // Load a unsigned byte (8bits unsigned) from memory
 240 class LoadUBNode : public LoadNode {
 241 public:
 242   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, Sem sem)
 243     : LoadNode(c, mem, adr, at, ti, sem) {}
 244   virtual int Opcode() const;
 245   virtual uint ideal_reg() const { return Op_RegI; }
 246   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
 247   virtual const Type *Value(PhaseTransform *phase) const;
 248   virtual int store_Opcode() const { return Op_StoreB; }
 249   virtual BasicType memory_type() const { return T_BYTE; }
 250 };
 251 
 252 //------------------------------LoadUSNode-------------------------------------
 253 // Load an unsigned short/char (16bits unsigned) from memory
 254 class LoadUSNode : public LoadNode {
 255 public:
 256   LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, Sem sem)
 257     : LoadNode(c, mem, adr, at, ti, sem) {}
 258   virtual int Opcode() const;
 259   virtual uint ideal_reg() const { return Op_RegI; }
 260   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 261   virtual const Type *Value(PhaseTransform *phase) const;
 262   virtual int store_Opcode() const { return Op_StoreC; }
 263   virtual BasicType memory_type() const { return T_CHAR; }
 264 };
 265 
 266 //------------------------------LoadSNode--------------------------------------
 267 // Load a short (16bits signed) from memory
 268 class LoadSNode : public LoadNode {
 269 public:
 270   LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, Sem sem)
 271     : LoadNode(c, mem, adr, at, ti, sem) {}
 272   virtual int Opcode() const;
 273   virtual uint ideal_reg() const { return Op_RegI; }
 274   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 275   virtual const Type *Value(PhaseTransform *phase) const;
 276   virtual int store_Opcode() const { return Op_StoreC; }
 277   virtual BasicType memory_type() const { return T_SHORT; }
 278 };
 279 
 280 //------------------------------LoadINode--------------------------------------
 281 // Load an integer from memory
 282 class LoadINode : public LoadNode {
 283 public:
 284   LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, Sem sem)
 285     : LoadNode(c, mem, adr, at, ti, sem) {}
 286   virtual int Opcode() const;
 287   virtual uint ideal_reg() const { return Op_RegI; }
 288   virtual int store_Opcode() const { return Op_StoreI; }
 289   virtual BasicType memory_type() const { return T_INT; }
 290 };
 291 
 292 //------------------------------LoadRangeNode----------------------------------
 293 // Load an array length from the array
 294 class LoadRangeNode : public LoadINode {
 295 public:
 296   LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
 297     : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, LoadNode::unordered) {}
 298   virtual int Opcode() const;
 299   virtual const Type *Value( PhaseTransform *phase ) const;
 300   virtual Node *Identity( PhaseTransform *phase );
 301   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 302 };
 303 
 304 //------------------------------LoadLNode--------------------------------------
 305 // Load a long from memory
 306 class LoadLNode : public LoadNode {
 307   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 308   virtual uint cmp( const Node &n ) const {
 309     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
 310       && LoadNode::cmp(n);
 311   }
 312   virtual uint size_of() const { return sizeof(*this); }
 313   const bool _require_atomic_access;  // is piecewise load forbidden?
 314 
 315 public:
 316   LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
 317             bool require_atomic_access, Sem sem)
 318     : LoadNode(c, mem, adr, at, tl, sem), _require_atomic_access(require_atomic_access) {}



 319   virtual int Opcode() const;
 320   virtual uint ideal_reg() const { return Op_RegL; }
 321   virtual int store_Opcode() const { return Op_StoreL; }
 322   virtual BasicType memory_type() const { return T_LONG; }
 323   bool require_atomic_access() { return _require_atomic_access; }
 324   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
 325                                 const Type* rt, Sem sem);
 326 #ifndef PRODUCT
 327   virtual void dump_spec(outputStream *st) const {
 328     LoadNode::dump_spec(st);
 329     if (_require_atomic_access)  st->print(" Atomic!");
 330   }
 331 #endif
 332 };
 333 
 334 //------------------------------LoadL_unalignedNode----------------------------
 335 // Load a long from unaligned memory
 336 class LoadL_unalignedNode : public LoadLNode {
 337 public:
 338   LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Sem sem)
 339     : LoadLNode(c, mem, adr, at, TypeLong::LONG, false, sem) {}
 340   virtual int Opcode() const;
 341 };
 342 
 343 //------------------------------LoadFNode--------------------------------------
 344 // Load a float (64 bits) from memory
 345 class LoadFNode : public LoadNode {
 346 public:
 347   LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, Sem sem)
 348     : LoadNode(c, mem, adr, at, t, sem) {}
 349   virtual int Opcode() const;
 350   virtual uint ideal_reg() const { return Op_RegF; }
 351   virtual int store_Opcode() const { return Op_StoreF; }
 352   virtual BasicType memory_type() const { return T_FLOAT; }
 353 };
 354 
 355 //------------------------------LoadDNode--------------------------------------
 356 // Load a double (64 bits) from memory
 357 class LoadDNode : public LoadNode {
 358 public:
 359   LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, Sem sem)
 360     : LoadNode(c, mem, adr, at, t, sem) {}
 361   virtual int Opcode() const;
 362   virtual uint ideal_reg() const { return Op_RegD; }
 363   virtual int store_Opcode() const { return Op_StoreD; }
 364   virtual BasicType memory_type() const { return T_DOUBLE; }
 365 };
 366 
 367 //------------------------------LoadD_unalignedNode----------------------------
 368 // Load a double from unaligned memory
 369 class LoadD_unalignedNode : public LoadDNode {
 370 public:
 371   LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Sem sem)
 372     : LoadDNode(c, mem, adr, at, Type::DOUBLE, sem) {}
 373   virtual int Opcode() const;
 374 };
 375 
 376 //------------------------------LoadPNode--------------------------------------
 377 // Load a pointer from memory (either object or array)
 378 class LoadPNode : public LoadNode {
 379 public:
 380   LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, Sem sem)
 381     : LoadNode(c, mem, adr, at, t, sem) {}
 382   virtual int Opcode() const;
 383   virtual uint ideal_reg() const { return Op_RegP; }
 384   virtual int store_Opcode() const { return Op_StoreP; }
 385   virtual BasicType memory_type() const { return T_ADDRESS; }
 386   // depends_only_on_test is almost always true, and needs to be almost always
 387   // true to enable key hoisting & commoning optimizations.  However, for the
 388   // special case of RawPtr loads from TLS top & end, the control edge carries
 389   // the dependence preventing hoisting past a Safepoint instead of the memory
 390   // edge.  (An unfortunate consequence of having Safepoints not set Raw
 391   // Memory; itself an unfortunate consequence of having Nodes which produce
 392   // results (new raw memory state) inside of loops preventing all manner of
 393   // other optimizations).  Basically, it's ugly but so is the alternative.
 394   // See comment in macro.cpp, around line 125 expand_allocate_common().
 395   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 396 };
 397 
 398 
 399 //------------------------------LoadNNode--------------------------------------
 400 // Load a narrow oop from memory (either object or array)
 401 class LoadNNode : public LoadNode {
 402 public:
 403   LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, Sem sem)
 404     : LoadNode(c, mem, adr, at, t, sem) {}
 405   virtual int Opcode() const;
 406   virtual uint ideal_reg() const { return Op_RegN; }
 407   virtual int store_Opcode() const { return Op_StoreN; }
 408   virtual BasicType memory_type() const { return T_NARROWOOP; }
 409   // depends_only_on_test is almost always true, and needs to be almost always
 410   // true to enable key hoisting & commoning optimizations.  However, for the
 411   // special case of RawPtr loads from TLS top & end, the control edge carries
 412   // the dependence preventing hoisting past a Safepoint instead of the memory
 413   // edge.  (An unfortunate consequence of having Safepoints not set Raw
 414   // Memory; itself an unfortunate consequence of having Nodes which produce
 415   // results (new raw memory state) inside of loops preventing all manner of
 416   // other optimizations).  Basically, it's ugly but so is the alternative.
 417   // See comment in macro.cpp, around line 125 expand_allocate_common().
 418   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 419 };
 420 
 421 //------------------------------LoadKlassNode----------------------------------
 422 // Load a Klass from an object
 423 class LoadKlassNode : public LoadPNode {
 424 public:
 425   LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, Sem sem)
 426     : LoadPNode(c, mem, adr, at, tk, sem) {}
 427   virtual int Opcode() const;
 428   virtual const Type *Value( PhaseTransform *phase ) const;
 429   virtual Node *Identity( PhaseTransform *phase );
 430   virtual bool depends_only_on_test() const { return true; }
 431 
 432   // Polymorphic factory method:
 433   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
 434                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
 435 };
 436 
 437 //------------------------------LoadNKlassNode---------------------------------
 438 // Load a narrow Klass from an object.
 439 class LoadNKlassNode : public LoadNNode {
 440 public:
 441   LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, Sem sem)
 442     : LoadNNode(c, mem, adr, at, tk, sem) {}
 443   virtual int Opcode() const;
 444   virtual uint ideal_reg() const { return Op_RegN; }
 445   virtual int store_Opcode() const { return Op_StoreNKlass; }
 446   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 447 
 448   virtual const Type *Value( PhaseTransform *phase ) const;
 449   virtual Node *Identity( PhaseTransform *phase );
 450   virtual bool depends_only_on_test() const { return true; }
 451 };
 452 
 453 
 454 //------------------------------StoreNode--------------------------------------
 455 // Store value; requires Store, Address and Value
 456 class StoreNode : public MemNode {
 457 public:
 458   typedef enum { unordered = 0, release } Sem;
 459 private:
 460   // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
 461   // stores that can be reordered, and such requiring release semantics to
 462   // adhere to the Java specification.  The required behaviour is stored in
 463   // this field.
 464   const Sem _sem;
 465   // Needed for proper cloning.
 466   virtual uint size_of() const { return sizeof(*this); }
 467 protected:
 468   virtual uint cmp( const Node &n ) const;
 469   virtual bool depends_only_on_test() const { return false; }
 470 
 471   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 472   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 473 
 474 public:
 475   // We must ensure that stores of object references will be visible
 476   // only after the object's initialization. So the callers of this
 477   // procedure must indicate that the store requires `release'
 478   // semantics, if the stored value is an object reference that might
 479   // point to a new object and may become externally visible.
 480   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 481     : MemNode(c, mem, adr, at, val), _sem(sem) {
 482     init_class_id(Class_Store);
 483   }
 484   StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, Sem sem)
 485     : MemNode(c, mem, adr, at, val, oop_store), _sem(sem) {
 486     init_class_id(Class_Store);
 487   }
 488 
 489   inline bool is_unordered() const { return !is_release(); }
 490   inline bool is_release() const {
 491     assert((_sem == unordered || _sem == release), "unexpected");
 492     return _sem == release;
 493   }
 494 
 495   // Conservatively release stores of object references in order to
 496   // ensure visibility of object initialization.
 497   static inline Sem release_if_reference(const BasicType t) {
 498     const Sem s = (t == T_ARRAY ||
 499                    t == T_ADDRESS || // Might be the address of an object reference (`boxing').
 500                    t == T_OBJECT) ? release : unordered;
 501     return s;
 502   }
 503 
 504   // Polymorphic factory method
 505   //
 506   // We must ensure that stores of object references will be visible
 507   // only after the object's initialization. So the callers of this
 508   // procedure must indicate that the store requires `release'
 509   // semantics, if the stored value is an object reference that might
 510   // point to a new object and may become externally visible.
 511   static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
 512                          const TypePtr* at, Node *val, BasicType bt, Sem sem);
 513 
 514   virtual uint hash() const;    // Check the type
 515 
 516   // If the store is to Field memory and the pointer is non-null, we can
 517   // zero out the control input.
 518   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 519 
 520   // Compute a new Type for this node.  Basically we just do the pre-check,
 521   // then call the virtual add() to set the type.
 522   virtual const Type *Value( PhaseTransform *phase ) const;
 523 
 524   // Check for identity function on memory (Load then Store at same address)
 525   virtual Node *Identity( PhaseTransform *phase );
 526 
 527   // Do not match memory edge
 528   virtual uint match_edge(uint idx) const;
 529 
 530   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 531 
 532   // Map a store opcode to its corresponding own opcode, trivially.
 533   virtual int store_Opcode() const { return Opcode(); }
 534 
 535   // have all possible loads of the value stored been optimized away?
 536   bool value_never_loaded(PhaseTransform *phase) const;
 537 };
 538 
 539 //------------------------------StoreBNode-------------------------------------
 540 // Store byte to memory
 541 class StoreBNode : public StoreNode {
 542 public:
 543   StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 544     : StoreNode(c, mem, adr, at, val, sem) {}
 545   virtual int Opcode() const;
 546   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 547   virtual BasicType memory_type() const { return T_BYTE; }
 548 };
 549 
 550 //------------------------------StoreCNode-------------------------------------
 551 // Store char/short to memory
 552 class StoreCNode : public StoreNode {
 553 public:
 554   StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 555     : StoreNode(c, mem, adr, at, val, sem) {}
 556   virtual int Opcode() const;
 557   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 558   virtual BasicType memory_type() const { return T_CHAR; }
 559 };
 560 
 561 //------------------------------StoreINode-------------------------------------
 562 // Store int to memory
 563 class StoreINode : public StoreNode {
 564 public:
 565   StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 566     : StoreNode(c, mem, adr, at, val, sem) {}
 567   virtual int Opcode() const;
 568   virtual BasicType memory_type() const { return T_INT; }
 569 };
 570 
 571 //------------------------------StoreLNode-------------------------------------
 572 // Store long to memory
 573 class StoreLNode : public StoreNode {
 574   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 575   virtual uint cmp( const Node &n ) const {
 576     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
 577       && StoreNode::cmp(n);
 578   }
 579   virtual uint size_of() const { return sizeof(*this); }
 580   const bool _require_atomic_access;  // is piecewise store forbidden?
 581 
 582 public:
 583   StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, bool require_atomic_access, Sem sem)
 584     : StoreNode(c, mem, adr, at, val, sem), _require_atomic_access(require_atomic_access) {}



 585   virtual int Opcode() const;
 586   virtual BasicType memory_type() const { return T_LONG; }
 587   bool require_atomic_access() { return _require_atomic_access; }
 588   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, Sem sem);
 589 #ifndef PRODUCT
 590   virtual void dump_spec(outputStream *st) const {
 591     StoreNode::dump_spec(st);
 592     if (_require_atomic_access)  st->print(" Atomic!");
 593   }
 594 #endif
 595 };
 596 
 597 //------------------------------StoreFNode-------------------------------------
 598 // Store float to memory
 599 class StoreFNode : public StoreNode {
 600 public:
 601   StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 602     : StoreNode(c, mem, adr, at, val, sem) {}
 603   virtual int Opcode() const;
 604   virtual BasicType memory_type() const { return T_FLOAT; }
 605 };
 606 
 607 //------------------------------StoreDNode-------------------------------------
 608 // Store double to memory
 609 class StoreDNode : public StoreNode {
 610 public:
 611   StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 612     : StoreNode(c, mem, adr, at, val, sem) {}
 613   virtual int Opcode() const;
 614   virtual BasicType memory_type() const { return T_DOUBLE; }
 615 };
 616 
 617 //------------------------------StorePNode-------------------------------------
 618 // Store pointer to memory
 619 class StorePNode : public StoreNode {
 620 public:
 621   StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 622     : StoreNode(c, mem, adr, at, val, sem) {}
 623   virtual int Opcode() const;
 624   virtual BasicType memory_type() const { return T_ADDRESS; }
 625 };
 626 
 627 //------------------------------StoreNNode-------------------------------------
 628 // Store narrow oop to memory
 629 class StoreNNode : public StoreNode {
 630 public:
 631   StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 632     : StoreNode(c, mem, adr, at, val, sem) {}
 633   virtual int Opcode() const;
 634   virtual BasicType memory_type() const { return T_NARROWOOP; }
 635 };
 636 
 637 //------------------------------StoreNKlassNode--------------------------------------
 638 // Store narrow klass to memory
 639 class StoreNKlassNode : public StoreNNode {
 640 public:
 641   StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Sem sem)
 642     : StoreNNode(c, mem, adr, at, val, sem) {}
 643   virtual int Opcode() const;
 644   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 645 };
 646 
 647 //------------------------------StoreCMNode-----------------------------------
 648 // Store card-mark byte to memory for CM
 649 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
 650 // Preceeding equivalent StoreCMs may be eliminated.
 651 class StoreCMNode : public StoreNode {
 652  private:
 653   virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
 654   virtual uint cmp( const Node &n ) const {
 655     return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
 656       && StoreNode::cmp(n);
 657   }
 658   virtual uint size_of() const { return sizeof(*this); }
 659   int _oop_alias_idx;   // The alias_idx of OopStore
 660 
 661 public:
 662   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
 663     StoreNode(c, mem, adr, at, val, oop_store, StoreNode::release),
 664     _oop_alias_idx(oop_alias_idx) {
 665     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
 666            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
 667            "bad oop alias idx");
 668   }
 669   virtual int Opcode() const;
 670   virtual Node *Identity( PhaseTransform *phase );
 671   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 672   virtual const Type *Value( PhaseTransform *phase ) const;
 673   virtual BasicType memory_type() const { return T_VOID; } // unspecific
 674   int oop_alias_idx() const { return _oop_alias_idx; }
 675 };
 676 
 677 //------------------------------LoadPLockedNode---------------------------------
 678 // Load-locked a pointer from memory (either object or array).
 679 // On Sparc & Intel this is implemented as a normal pointer load.
 680 // On PowerPC and friends it's a real load-locked.
 681 class LoadPLockedNode : public LoadPNode {
 682 public:
 683   LoadPLockedNode(Node *c, Node *mem, Node *adr, Sem sem)
 684     : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, sem) {}
 685   virtual int Opcode() const;
 686   virtual int store_Opcode() const { return Op_StorePConditional; }
 687   virtual bool depends_only_on_test() const { return true; }
 688 };
 689 
 690 //------------------------------SCMemProjNode---------------------------------------
 691 // This class defines a projection of the memory  state of a store conditional node.
 692 // These nodes return a value, but also update memory.
 693 class SCMemProjNode : public ProjNode {
 694 public:
 695   enum {SCMEMPROJCON = (uint)-2};
 696   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
 697   virtual int Opcode() const;
 698   virtual bool      is_CFG() const  { return false; }
 699   virtual const Type *bottom_type() const {return Type::MEMORY;}
 700   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
 701   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
 702   virtual const Type *Value( PhaseTransform *phase ) const;
 703 #ifndef PRODUCT
 704   virtual void dump_spec(outputStream *st) const {};