1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
  26 #define SHARE_VM_OPTO_MEMNODE_HPP
  27 
  28 #include "opto/multnode.hpp"
  29 #include "opto/node.hpp"
  30 #include "opto/opcodes.hpp"
  31 #include "opto/type.hpp"
  32 
  33 // Portions of code courtesy of Clifford Click
  34 
  35 class MultiNode;
  36 class PhaseCCP;
  37 class PhaseTransform;
  38 
  39 //------------------------------MemNode----------------------------------------
  40 // Load or Store, possibly throwing a NULL pointer exception
  41 class MemNode : public Node {
  42 protected:
  43 #ifdef ASSERT
  44   const TypePtr* _adr_type;     // What kind of memory is being addressed?
  45 #endif
  46   virtual uint size_of() const; // Size is bigger (ASSERT only)
  47 public:
  48   enum { Control,               // When is it safe to do this load?
  49          Memory,                // Chunk of memory is being loaded from
  50          Address,               // Actually address, derived from base
  51          ValueIn,               // Value to store
  52          OopStore               // Preceeding oop store, only in StoreCM
  53   };
  54 protected:
  55   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
  56     : Node(c0,c1,c2   ) {
  57     init_class_id(Class_Mem);
  58     debug_only(_adr_type=at; adr_type();)
  59   }
  60   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
  61     : Node(c0,c1,c2,c3) {
  62     init_class_id(Class_Mem);
  63     debug_only(_adr_type=at; adr_type();)
  64   }
  65   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
  66     : Node(c0,c1,c2,c3,c4) {
  67     init_class_id(Class_Mem);
  68     debug_only(_adr_type=at; adr_type();)
  69   }
  70 
  71 public:
  72   // Helpers for the optimizer.  Documented in memnode.cpp.
  73   static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
  74                                       Node* p2, AllocateNode* a2,
  75                                       PhaseTransform* phase);
  76   static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
  77 
  78   static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
  79   static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
  80   // This one should probably be a phase-specific function:
  81   static bool all_controls_dominate(Node* dom, Node* sub);
  82 
  83   // Find any cast-away of null-ness and keep its control.
  84   static  Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
  85   virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
  86 
  87   virtual const class TypePtr *adr_type() const;  // returns bottom_type of address
  88 
  89   // Shared code for Ideal methods:
  90   Node *Ideal_common(PhaseGVN *phase, bool can_reshape);  // Return -1 for short-circuit NULL.
  91 
  92   // Helper function for adr_type() implementations.
  93   static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
  94 
  95   // Raw access function, to allow copying of adr_type efficiently in
  96   // product builds and retain the debug info for debug builds.
  97   const TypePtr *raw_adr_type() const {
  98 #ifdef ASSERT
  99     return _adr_type;
 100 #else
 101     return 0;
 102 #endif
 103   }
 104 
 105   // Map a load or store opcode to its corresponding store opcode.
 106   // (Return -1 if unknown.)
 107   virtual int store_Opcode() const { return -1; }
 108 
 109   // What is the type of the value in memory?  (T_VOID mean "unspecified".)
 110   virtual BasicType memory_type() const = 0;
 111   virtual int memory_size() const {
 112 #ifdef ASSERT
 113     return type2aelembytes(memory_type(), true);
 114 #else
 115     return type2aelembytes(memory_type());
 116 #endif
 117   }
 118 
 119   // Search through memory states which precede this node (load or store).
 120   // Look for an exact match for the address, with no intervening
 121   // aliased stores.
 122   Node* find_previous_store(PhaseTransform* phase);
 123 
 124   // Can this node (load or store) accurately see a stored value in
 125   // the given memory state?  (The state may or may not be in(Memory).)
 126   Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
 127 
 128 #ifndef PRODUCT
 129   static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
 130   virtual void dump_spec(outputStream *st) const;
 131 #endif
 132 };
 133 
 134 //------------------------------LoadNode---------------------------------------
 135 // Load value; requires Memory and Address
 136 class LoadNode : public MemNode {
 137 protected:
 138   virtual uint cmp( const Node &n ) const;
 139   virtual uint size_of() const; // Size is bigger
 140   const Type* const _type;      // What kind of value is loaded?
 141 public:
 142 
 143   LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
 144     : MemNode(c,mem,adr,at), _type(rt) {
 145     init_class_id(Class_Load);
 146   }
 147 
 148   // Polymorphic factory method:
 149   static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
 150                      const TypePtr* at, const Type *rt, BasicType bt );
 151 
 152   virtual uint hash()   const;  // Check the type
 153 
 154   // Handle algebraic identities here.  If we have an identity, return the Node
 155   // we are equivalent to.  We look for Load of a Store.
 156   virtual Node *Identity( PhaseTransform *phase );
 157 
 158   // If the load is from Field memory and the pointer is non-null, we can
 159   // zero out the control input.
 160   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 161 
 162   // Split instance field load through Phi.
 163   Node* split_through_phi(PhaseGVN *phase);
 164 
 165   // Recover original value from boxed values
 166   Node *eliminate_autobox(PhaseGVN *phase);
 167 
 168   // Compute a new Type for this node.  Basically we just do the pre-check,
 169   // then call the virtual add() to set the type.
 170   virtual const Type *Value( PhaseTransform *phase ) const;
 171 
 172   // Common methods for LoadKlass and LoadNKlass nodes.
 173   const Type *klass_value_common( PhaseTransform *phase ) const;
 174   Node *klass_identity_common( PhaseTransform *phase );
 175 
 176   virtual uint ideal_reg() const;
 177   virtual const Type *bottom_type() const;
 178   // Following method is copied from TypeNode:
 179   void set_type(const Type* t) {
 180     assert(t != NULL, "sanity");
 181     debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
 182     *(const Type**)&_type = t;   // cast away const-ness
 183     // If this node is in the hash table, make sure it doesn't need a rehash.
 184     assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
 185   }
 186   const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
 187 
 188   // Do not match memory edge
 189   virtual uint match_edge(uint idx) const;
 190 
 191   // Map a load opcode to its corresponding store opcode.
 192   virtual int store_Opcode() const = 0;
 193 
 194   // Check if the load's memory input is a Phi node with the same control.
 195   bool is_instance_field_load_with_local_phi(Node* ctrl);
 196 
 197 #ifndef PRODUCT
 198   virtual void dump_spec(outputStream *st) const;
 199 #endif
 200 #ifdef ASSERT
 201   // Helper function to allow a raw load without control edge for some cases
 202   static bool is_immutable_value(Node* adr);
 203 #endif
 204 protected:
 205   const Type* load_array_final_field(const TypeKlassPtr *tkls,
 206                                      ciKlass* klass) const;
 207   // depends_only_on_test is almost always true, and needs to be almost always
 208   // true to enable key hoisting & commoning optimizations.  However, for the
 209   // special case of RawPtr loads from TLS top & end, and other loads performed by
 210   // GC barriers, the control edge carries the dependence preventing hoisting past
 211   // a Safepoint instead of the memory edge.  (An unfortunate consequence of having
 212   // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
 213   // which produce results (new raw memory state) inside of loops preventing all
 214   // manner of other optimizations).  Basically, it's ugly but so is the alternative.
 215   // See comment in macro.cpp, around line 125 expand_allocate_common().
 216   virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
 217 
 218 };
 219 
 220 //------------------------------LoadBNode--------------------------------------
 221 // Load a byte (8bits signed) from memory
 222 class LoadBNode : public LoadNode {
 223 public:
 224   LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
 225     : LoadNode(c,mem,adr,at,ti) {}
 226   virtual int Opcode() const;
 227   virtual uint ideal_reg() const { return Op_RegI; }
 228   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 229   virtual const Type *Value(PhaseTransform *phase) const;
 230   virtual int store_Opcode() const { return Op_StoreB; }
 231   virtual BasicType memory_type() const { return T_BYTE; }
 232 };
 233 
 234 //------------------------------LoadUBNode-------------------------------------
 235 // Load a unsigned byte (8bits unsigned) from memory
 236 class LoadUBNode : public LoadNode {
 237 public:
 238   LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
 239     : LoadNode(c, mem, adr, at, ti) {}
 240   virtual int Opcode() const;
 241   virtual uint ideal_reg() const { return Op_RegI; }
 242   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
 243   virtual const Type *Value(PhaseTransform *phase) const;
 244   virtual int store_Opcode() const { return Op_StoreB; }
 245   virtual BasicType memory_type() const { return T_BYTE; }
 246 };
 247 
 248 //------------------------------LoadUSNode-------------------------------------
 249 // Load an unsigned short/char (16bits unsigned) from memory
 250 class LoadUSNode : public LoadNode {
 251 public:
 252   LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
 253     : LoadNode(c,mem,adr,at,ti) {}
 254   virtual int Opcode() const;
 255   virtual uint ideal_reg() const { return Op_RegI; }
 256   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 257   virtual const Type *Value(PhaseTransform *phase) const;
 258   virtual int store_Opcode() const { return Op_StoreC; }
 259   virtual BasicType memory_type() const { return T_CHAR; }
 260 };
 261 
 262 //------------------------------LoadSNode--------------------------------------
 263 // Load a short (16bits signed) from memory
 264 class LoadSNode : public LoadNode {
 265 public:
 266   LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
 267     : LoadNode(c,mem,adr,at,ti) {}
 268   virtual int Opcode() const;
 269   virtual uint ideal_reg() const { return Op_RegI; }
 270   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 271   virtual const Type *Value(PhaseTransform *phase) const;
 272   virtual int store_Opcode() const { return Op_StoreC; }
 273   virtual BasicType memory_type() const { return T_SHORT; }
 274 };
 275 
 276 //------------------------------LoadINode--------------------------------------
 277 // Load an integer from memory
 278 class LoadINode : public LoadNode {
 279 public:
 280   LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
 281     : LoadNode(c,mem,adr,at,ti) {}
 282   virtual int Opcode() const;
 283   virtual uint ideal_reg() const { return Op_RegI; }
 284   virtual int store_Opcode() const { return Op_StoreI; }
 285   virtual BasicType memory_type() const { return T_INT; }
 286 };
 287 
 288 //------------------------------LoadRangeNode----------------------------------
 289 // Load an array length from the array
 290 class LoadRangeNode : public LoadINode {
 291 public:
 292   LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
 293     : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
 294   virtual int Opcode() const;
 295   virtual const Type *Value( PhaseTransform *phase ) const;
 296   virtual Node *Identity( PhaseTransform *phase );
 297   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 298 };
 299 
 300 //------------------------------LoadLNode--------------------------------------
 301 // Load a long from memory
 302 class LoadLNode : public LoadNode {
 303   virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
 304   virtual uint cmp( const Node &n ) const {
 305     return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
 306       && LoadNode::cmp(n);
 307   }
 308   virtual uint size_of() const { return sizeof(*this); }
 309   const bool _require_atomic_access;  // is piecewise load forbidden?
 310 
 311 public:
 312   LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
 313              const TypeLong *tl = TypeLong::LONG,
 314              bool require_atomic_access = false )
 315     : LoadNode(c,mem,adr,at,tl)
 316     , _require_atomic_access(require_atomic_access)
 317   {}
 318   virtual int Opcode() const;
 319   virtual uint ideal_reg() const { return Op_RegL; }
 320   virtual int store_Opcode() const { return Op_StoreL; }
 321   virtual BasicType memory_type() const { return T_LONG; }
 322   bool require_atomic_access() { return _require_atomic_access; }
 323   static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
 324 #ifndef PRODUCT
 325   virtual void dump_spec(outputStream *st) const {
 326     LoadNode::dump_spec(st);
 327     if (_require_atomic_access)  st->print(" Atomic!");
 328   }
 329 #endif
 330 };
 331 
 332 //------------------------------LoadL_unalignedNode----------------------------
 333 // Load a long from unaligned memory
 334 class LoadL_unalignedNode : public LoadLNode {
 335 public:
 336   LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
 337     : LoadLNode(c,mem,adr,at) {}
 338   virtual int Opcode() const;
 339 };
 340 
 341 //------------------------------LoadFNode--------------------------------------
 342 // Load a float (64 bits) from memory
 343 class LoadFNode : public LoadNode {
 344 public:
 345   LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
 346     : LoadNode(c,mem,adr,at,t) {}
 347   virtual int Opcode() const;
 348   virtual uint ideal_reg() const { return Op_RegF; }
 349   virtual int store_Opcode() const { return Op_StoreF; }
 350   virtual BasicType memory_type() const { return T_FLOAT; }
 351 };
 352 
 353 //------------------------------LoadDNode--------------------------------------
 354 // Load a double (64 bits) from memory
 355 class LoadDNode : public LoadNode {
 356 public:
 357   LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
 358     : LoadNode(c,mem,adr,at,t) {}
 359   virtual int Opcode() const;
 360   virtual uint ideal_reg() const { return Op_RegD; }
 361   virtual int store_Opcode() const { return Op_StoreD; }
 362   virtual BasicType memory_type() const { return T_DOUBLE; }
 363 };
 364 
 365 //------------------------------LoadD_unalignedNode----------------------------
 366 // Load a double from unaligned memory
 367 class LoadD_unalignedNode : public LoadDNode {
 368 public:
 369   LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
 370     : LoadDNode(c,mem,adr,at) {}
 371   virtual int Opcode() const;
 372 };
 373 
 374 //------------------------------LoadPNode--------------------------------------
 375 // Load a pointer from memory (either object or array)
 376 class LoadPNode : public LoadNode {
 377 public:
 378   LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
 379     : LoadNode(c,mem,adr,at,t) {}
 380   virtual int Opcode() const;
 381   virtual uint ideal_reg() const { return Op_RegP; }
 382   virtual int store_Opcode() const { return Op_StoreP; }
 383   virtual BasicType memory_type() const { return T_ADDRESS; }
 384 };
 385 
 386 
 387 //------------------------------LoadNNode--------------------------------------
 388 // Load a narrow oop from memory (either object or array)
 389 class LoadNNode : public LoadNode {
 390 public:
 391   LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
 392     : LoadNode(c,mem,adr,at,t) {}
 393   virtual int Opcode() const;
 394   virtual uint ideal_reg() const { return Op_RegN; }
 395   virtual int store_Opcode() const { return Op_StoreN; }
 396   virtual BasicType memory_type() const { return T_NARROWOOP; }
 397 };
 398 
 399 //------------------------------LoadKlassNode----------------------------------
 400 // Load a Klass from an object
 401 class LoadKlassNode : public LoadPNode {
 402 public:
 403   LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
 404     : LoadPNode(c,mem,adr,at,tk) {}
 405   virtual int Opcode() const;
 406   virtual const Type *Value( PhaseTransform *phase ) const;
 407   virtual Node *Identity( PhaseTransform *phase );
 408   virtual bool depends_only_on_test() const { return true; }
 409 
 410   // Polymorphic factory method:
 411   static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
 412                      const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
 413 };
 414 
 415 //------------------------------LoadNKlassNode---------------------------------
 416 // Load a narrow Klass from an object.
 417 class LoadNKlassNode : public LoadNNode {
 418 public:
 419   LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk )
 420     : LoadNNode(c,mem,adr,at,tk) {}
 421   virtual int Opcode() const;
 422   virtual uint ideal_reg() const { return Op_RegN; }
 423   virtual int store_Opcode() const { return Op_StoreNKlass; }
 424   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 425 
 426   virtual const Type *Value( PhaseTransform *phase ) const;
 427   virtual Node *Identity( PhaseTransform *phase );
 428   virtual bool depends_only_on_test() const { return true; }
 429 };
 430 
 431 
 432 //------------------------------StoreNode--------------------------------------
 433 // Store value; requires Store, Address and Value
 434 class StoreNode : public MemNode {
 435 protected:
 436   virtual uint cmp( const Node &n ) const;
 437   virtual bool depends_only_on_test() const { return false; }
 438 
 439   Node *Ideal_masked_input       (PhaseGVN *phase, uint mask);
 440   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 441 
 442 public:
 443   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
 444     : MemNode(c,mem,adr,at,val) {
 445     init_class_id(Class_Store);
 446   }
 447   StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
 448     : MemNode(c,mem,adr,at,val,oop_store) {
 449     init_class_id(Class_Store);
 450   }
 451 
 452   // Polymorphic factory method:
 453   static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
 454                           const TypePtr* at, Node *val, BasicType bt );
 455 
 456   virtual uint hash() const;    // Check the type
 457 
 458   // If the store is to Field memory and the pointer is non-null, we can
 459   // zero out the control input.
 460   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 461 
 462   // Compute a new Type for this node.  Basically we just do the pre-check,
 463   // then call the virtual add() to set the type.
 464   virtual const Type *Value( PhaseTransform *phase ) const;
 465 
 466   // Check for identity function on memory (Load then Store at same address)
 467   virtual Node *Identity( PhaseTransform *phase );
 468 
 469   // Do not match memory edge
 470   virtual uint match_edge(uint idx) const;
 471 
 472   virtual const Type *bottom_type() const;  // returns Type::MEMORY
 473 
 474   // Map a store opcode to its corresponding own opcode, trivially.
 475   virtual int store_Opcode() const { return Opcode(); }
 476 
 477   // have all possible loads of the value stored been optimized away?
 478   bool value_never_loaded(PhaseTransform *phase) const;
 479 };
 480 
 481 //------------------------------StoreBNode-------------------------------------
 482 // Store byte to memory
 483 class StoreBNode : public StoreNode {
 484 public:
 485   StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 486   virtual int Opcode() const;
 487   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 488   virtual BasicType memory_type() const { return T_BYTE; }
 489 };
 490 
 491 //------------------------------StoreCNode-------------------------------------
 492 // Store char/short to memory
 493 class StoreCNode : public StoreNode {
 494 public:
 495   StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 496   virtual int Opcode() const;
 497   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 498   virtual BasicType memory_type() const { return T_CHAR; }
 499 };
 500 
 501 //------------------------------StoreINode-------------------------------------
 502 // Store int to memory
 503 class StoreINode : public StoreNode {
 504 public:
 505   StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 506   virtual int Opcode() const;
 507   virtual BasicType memory_type() const { return T_INT; }
 508 };
 509 
 510 //------------------------------StoreLNode-------------------------------------
 511 // Store long to memory
 512 class StoreLNode : public StoreNode {
 513   virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
 514   virtual uint cmp( const Node &n ) const {
 515     return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
 516       && StoreNode::cmp(n);
 517   }
 518   virtual uint size_of() const { return sizeof(*this); }
 519   const bool _require_atomic_access;  // is piecewise store forbidden?
 520 
 521 public:
 522   StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
 523               bool require_atomic_access = false )
 524     : StoreNode(c,mem,adr,at,val)
 525     , _require_atomic_access(require_atomic_access)
 526   {}
 527   virtual int Opcode() const;
 528   virtual BasicType memory_type() const { return T_LONG; }
 529   bool require_atomic_access() { return _require_atomic_access; }
 530   static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
 531 #ifndef PRODUCT
 532   virtual void dump_spec(outputStream *st) const {
 533     StoreNode::dump_spec(st);
 534     if (_require_atomic_access)  st->print(" Atomic!");
 535   }
 536 #endif
 537 };
 538 
 539 //------------------------------StoreFNode-------------------------------------
 540 // Store float to memory
 541 class StoreFNode : public StoreNode {
 542 public:
 543   StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 544   virtual int Opcode() const;
 545   virtual BasicType memory_type() const { return T_FLOAT; }
 546 };
 547 
 548 //------------------------------StoreDNode-------------------------------------
 549 // Store double to memory
 550 class StoreDNode : public StoreNode {
 551 public:
 552   StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 553   virtual int Opcode() const;
 554   virtual BasicType memory_type() const { return T_DOUBLE; }
 555 };
 556 
 557 //------------------------------StorePNode-------------------------------------
 558 // Store pointer to memory
 559 class StorePNode : public StoreNode {
 560 public:
 561   StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 562   virtual int Opcode() const;
 563   virtual BasicType memory_type() const { return T_ADDRESS; }
 564 };
 565 
 566 //------------------------------StoreNNode-------------------------------------
 567 // Store narrow oop to memory
 568 class StoreNNode : public StoreNode {
 569 public:
 570   StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
 571   virtual int Opcode() const;
 572   virtual BasicType memory_type() const { return T_NARROWOOP; }
 573 };
 574 
 575 //------------------------------StoreNKlassNode--------------------------------------
 576 // Store narrow klass to memory
 577 class StoreNKlassNode : public StoreNNode {
 578 public:
 579   StoreNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNNode(c,mem,adr,at,val) {}
 580   virtual int Opcode() const;
 581   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 582 };
 583 
 584 //------------------------------StoreCMNode-----------------------------------
 585 // Store card-mark byte to memory for CM
 586 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
 587 // Preceeding equivalent StoreCMs may be eliminated.
 588 class StoreCMNode : public StoreNode {
 589  private:
 590   virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
 591   virtual uint cmp( const Node &n ) const {
 592     return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
 593       && StoreNode::cmp(n);
 594   }
 595   virtual uint size_of() const { return sizeof(*this); }
 596   int _oop_alias_idx;   // The alias_idx of OopStore
 597 
 598 public:
 599   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
 600     StoreNode(c,mem,adr,at,val,oop_store),
 601     _oop_alias_idx(oop_alias_idx) {
 602     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
 603            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
 604            "bad oop alias idx");
 605   }
 606   virtual int Opcode() const;
 607   virtual Node *Identity( PhaseTransform *phase );
 608   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 609   virtual const Type *Value( PhaseTransform *phase ) const;
 610   virtual BasicType memory_type() const { return T_VOID; } // unspecific
 611   int oop_alias_idx() const { return _oop_alias_idx; }
 612 };
 613 
 614 //------------------------------LoadPLockedNode---------------------------------
 615 // Load-locked a pointer from memory (either object or array).
 616 // On Sparc & Intel this is implemented as a normal pointer load.
 617 // On PowerPC and friends it's a real load-locked.
 618 class LoadPLockedNode : public LoadPNode {
 619 public:
 620   LoadPLockedNode( Node *c, Node *mem, Node *adr )
 621     : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
 622   virtual int Opcode() const;
 623   virtual int store_Opcode() const { return Op_StorePConditional; }
 624   virtual bool depends_only_on_test() const { return true; }
 625 };
 626 
 627 //------------------------------SCMemProjNode---------------------------------------
 628 // This class defines a projection of the memory  state of a store conditional node.
 629 // These nodes return a value, but also update memory.
 630 class SCMemProjNode : public ProjNode {
 631 public:
 632   enum {SCMEMPROJCON = (uint)-2};
 633   SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
 634   virtual int Opcode() const;
 635   virtual bool      is_CFG() const  { return false; }
 636   virtual const Type *bottom_type() const {return Type::MEMORY;}
 637   virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
 638   virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
 639   virtual const Type *Value( PhaseTransform *phase ) const;
 640 #ifndef PRODUCT
 641   virtual void dump_spec(outputStream *st) const {};
 642 #endif
 643 };
 644 
 645 //------------------------------LoadStoreNode---------------------------
 646 // Note: is_Mem() method returns 'true' for this class.
 647 class LoadStoreNode : public Node {
 648 private:
 649   const Type* const _type;      // What kind of value is loaded?
 650   const TypePtr* _adr_type;     // What kind of memory is being addressed?
 651   virtual uint size_of() const; // Size is bigger
 652 public:
 653   LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
 654   virtual bool depends_only_on_test() const { return false; }
 655   virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
 656 
 657   virtual const Type *bottom_type() const { return _type; }
 658   virtual uint ideal_reg() const;
 659   virtual const class TypePtr *adr_type() const { return _adr_type; }  // returns bottom_type of address
 660 
 661   bool result_not_used() const;
 662 };
 663 
 664 class LoadStoreConditionalNode : public LoadStoreNode {
 665 public:
 666   enum {
 667     ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
 668   };
 669   LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
 670 };
 671 
 672 //------------------------------StorePConditionalNode---------------------------
 673 // Conditionally store pointer to memory, if no change since prior
 674 // load-locked.  Sets flags for success or failure of the store.
 675 class StorePConditionalNode : public LoadStoreConditionalNode {
 676 public:
 677   StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
 678   virtual int Opcode() const;
 679   // Produces flags
 680   virtual uint ideal_reg() const { return Op_RegFlags; }
 681 };
 682 
 683 //------------------------------StoreIConditionalNode---------------------------
 684 // Conditionally store int to memory, if no change since prior
 685 // load-locked.  Sets flags for success or failure of the store.
 686 class StoreIConditionalNode : public LoadStoreConditionalNode {
 687 public:
 688   StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
 689   virtual int Opcode() const;
 690   // Produces flags
 691   virtual uint ideal_reg() const { return Op_RegFlags; }
 692 };
 693 
 694 //------------------------------StoreLConditionalNode---------------------------
 695 // Conditionally store long to memory, if no change since prior
 696 // load-locked.  Sets flags for success or failure of the store.
 697 class StoreLConditionalNode : public LoadStoreConditionalNode {
 698 public:
 699   StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
 700   virtual int Opcode() const;
 701   // Produces flags
 702   virtual uint ideal_reg() const { return Op_RegFlags; }
 703 };
 704 
 705 
 706 //------------------------------CompareAndSwapLNode---------------------------
 707 class CompareAndSwapLNode : public LoadStoreConditionalNode {
 708 public:
 709   CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
 710   virtual int Opcode() const;
 711 };
 712 
 713 
 714 //------------------------------CompareAndSwapINode---------------------------
 715 class CompareAndSwapINode : public LoadStoreConditionalNode {
 716 public:
 717   CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
 718   virtual int Opcode() const;
 719 };
 720 
 721 
 722 //------------------------------CompareAndSwapPNode---------------------------
 723 class CompareAndSwapPNode : public LoadStoreConditionalNode {
 724 public:
 725   CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
 726   virtual int Opcode() const;
 727 };
 728 
 729 //------------------------------CompareAndSwapNNode---------------------------
 730 class CompareAndSwapNNode : public LoadStoreConditionalNode {
 731 public:
 732   CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
 733   virtual int Opcode() const;
 734 };
 735 
 736 //------------------------------GetAndAddINode---------------------------
 737 class GetAndAddINode : public LoadStoreNode {
 738 public:
 739   GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
 740   virtual int Opcode() const;
 741 };
 742 
 743 //------------------------------GetAndAddLNode---------------------------
 744 class GetAndAddLNode : public LoadStoreNode {
 745 public:
 746   GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
 747   virtual int Opcode() const;
 748 };
 749 
 750 
 751 //------------------------------GetAndSetINode---------------------------
 752 class GetAndSetINode : public LoadStoreNode {
 753 public:
 754   GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
 755   virtual int Opcode() const;
 756 };
 757 
 758 //------------------------------GetAndSetINode---------------------------
 759 class GetAndSetLNode : public LoadStoreNode {
 760 public:
 761   GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
 762   virtual int Opcode() const;
 763 };
 764 
 765 //------------------------------GetAndSetPNode---------------------------
 766 class GetAndSetPNode : public LoadStoreNode {
 767 public:
 768   GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
 769   virtual int Opcode() const;
 770 };
 771 
 772 //------------------------------GetAndSetNNode---------------------------
 773 class GetAndSetNNode : public LoadStoreNode {
 774 public:
 775   GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
 776   virtual int Opcode() const;
 777 };
 778 
 779 //------------------------------ClearArray-------------------------------------
 780 class ClearArrayNode: public Node {
 781 public:
 782   ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
 783     : Node(ctrl,arymem,word_cnt,base) {
 784     init_class_id(Class_ClearArray);
 785   }
 786   virtual int         Opcode() const;
 787   virtual const Type *bottom_type() const { return Type::MEMORY; }
 788   // ClearArray modifies array elements, and so affects only the
 789   // array memory addressed by the bottom_type of its base address.
 790   virtual const class TypePtr *adr_type() const;
 791   virtual Node *Identity( PhaseTransform *phase );
 792   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 793   virtual uint match_edge(uint idx) const;
 794 
 795   // Clear the given area of an object or array.
 796   // The start offset must always be aligned mod BytesPerInt.
 797   // The end offset must always be aligned mod BytesPerLong.
 798   // Return the new memory.
 799   static Node* clear_memory(Node* control, Node* mem, Node* dest,
 800                             intptr_t start_offset,
 801                             intptr_t end_offset,
 802                             PhaseGVN* phase);
 803   static Node* clear_memory(Node* control, Node* mem, Node* dest,
 804                             intptr_t start_offset,
 805                             Node* end_offset,
 806                             PhaseGVN* phase);
 807   static Node* clear_memory(Node* control, Node* mem, Node* dest,
 808                             Node* start_offset,
 809                             Node* end_offset,
 810                             PhaseGVN* phase);
 811   // Return allocation input memory edge if it is different instance
 812   // or itself if it is the one we are looking for.
 813   static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
 814 };
 815 
 816 //------------------------------StrIntrinsic-------------------------------
 817 // Base class for Ideal nodes used in String instrinsic code.
 818 class StrIntrinsicNode: public Node {
 819 public:
 820   StrIntrinsicNode(Node* control, Node* char_array_mem,
 821                    Node* s1, Node* c1, Node* s2, Node* c2):
 822     Node(control, char_array_mem, s1, c1, s2, c2) {
 823   }
 824 
 825   StrIntrinsicNode(Node* control, Node* char_array_mem,
 826                    Node* s1, Node* s2, Node* c):
 827     Node(control, char_array_mem, s1, s2, c) {
 828   }
 829 
 830   StrIntrinsicNode(Node* control, Node* char_array_mem,
 831                    Node* s1, Node* s2):
 832     Node(control, char_array_mem, s1, s2) {
 833   }
 834 
 835   virtual bool depends_only_on_test() const { return false; }
 836   virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
 837   virtual uint match_edge(uint idx) const;
 838   virtual uint ideal_reg() const { return Op_RegI; }
 839   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 840   virtual const Type *Value(PhaseTransform *phase) const;
 841 };
 842 
 843 //------------------------------StrComp-------------------------------------
 844 class StrCompNode: public StrIntrinsicNode {
 845 public:
 846   StrCompNode(Node* control, Node* char_array_mem,
 847               Node* s1, Node* c1, Node* s2, Node* c2):
 848     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
 849   virtual int Opcode() const;
 850   virtual const Type* bottom_type() const { return TypeInt::INT; }
 851 };
 852 
 853 //------------------------------StrEquals-------------------------------------
 854 class StrEqualsNode: public StrIntrinsicNode {
 855 public:
 856   StrEqualsNode(Node* control, Node* char_array_mem,
 857                 Node* s1, Node* s2, Node* c):
 858     StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
 859   virtual int Opcode() const;
 860   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
 861 };
 862 
 863 //------------------------------StrIndexOf-------------------------------------
 864 class StrIndexOfNode: public StrIntrinsicNode {
 865 public:
 866   StrIndexOfNode(Node* control, Node* char_array_mem,
 867               Node* s1, Node* c1, Node* s2, Node* c2):
 868     StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
 869   virtual int Opcode() const;
 870   virtual const Type* bottom_type() const { return TypeInt::INT; }
 871 };
 872 
 873 //------------------------------AryEq---------------------------------------
 874 class AryEqNode: public StrIntrinsicNode {
 875 public:
 876   AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
 877     StrIntrinsicNode(control, char_array_mem, s1, s2) {};
 878   virtual int Opcode() const;
 879   virtual const Type* bottom_type() const { return TypeInt::BOOL; }
 880 };
 881 
 882 
 883 //------------------------------EncodeISOArray--------------------------------
 884 // encode char[] to byte[] in ISO_8859_1
 885 class EncodeISOArrayNode: public Node {
 886 public:
 887   EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
 888   virtual int Opcode() const;
 889   virtual bool depends_only_on_test() const { return false; }
 890   virtual const Type* bottom_type() const { return TypeInt::INT; }
 891   virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
 892   virtual uint match_edge(uint idx) const;
 893   virtual uint ideal_reg() const { return Op_RegI; }
 894   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 895   virtual const Type *Value(PhaseTransform *phase) const;
 896 };
 897 
 898 //------------------------------MemBar-----------------------------------------
 899 // There are different flavors of Memory Barriers to match the Java Memory
 900 // Model.  Monitor-enter and volatile-load act as Aquires: no following ref
 901 // can be moved to before them.  We insert a MemBar-Acquire after a FastLock or
 902 // volatile-load.  Monitor-exit and volatile-store act as Release: no
 903 // preceding ref can be moved to after them.  We insert a MemBar-Release
 904 // before a FastUnlock or volatile-store.  All volatiles need to be
 905 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
 906 // separate it from any following volatile-load.
 907 class MemBarNode: public MultiNode {
 908   virtual uint hash() const ;                  // { return NO_HASH; }
 909   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
 910 
 911   virtual uint size_of() const { return sizeof(*this); }
 912   // Memory type this node is serializing.  Usually either rawptr or bottom.
 913   const TypePtr* _adr_type;
 914 
 915 public:
 916   enum {
 917     Precedent = TypeFunc::Parms  // optional edge to force precedence
 918   };
 919   MemBarNode(Compile* C, int alias_idx, Node* precedent);
 920   virtual int Opcode() const = 0;
 921   virtual const class TypePtr *adr_type() const { return _adr_type; }
 922   virtual const Type *Value( PhaseTransform *phase ) const;
 923   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 924   virtual uint match_edge(uint idx) const { return 0; }
 925   virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
 926   virtual Node *match( const ProjNode *proj, const Matcher *m );
 927   // Factory method.  Builds a wide or narrow membar.
 928   // Optional 'precedent' becomes an extra edge if not null.
 929   static MemBarNode* make(Compile* C, int opcode,
 930                           int alias_idx = Compile::AliasIdxBot,
 931                           Node* precedent = NULL);
 932 };
 933 
 934 // "Acquire" - no following ref can move before (but earlier refs can
 935 // follow, like an early Load stalled in cache).  Requires multi-cpu
 936 // visibility.  Inserted after a volatile load.
 937 class MemBarAcquireNode: public MemBarNode {
 938 public:
 939   MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
 940     : MemBarNode(C, alias_idx, precedent) {}
 941   virtual int Opcode() const;
 942 };
 943 
 944 // "Release" - no earlier ref can move after (but later refs can move
 945 // up, like a speculative pipelined cache-hitting Load).  Requires
 946 // multi-cpu visibility.  Inserted before a volatile store.
 947 class MemBarReleaseNode: public MemBarNode {
 948 public:
 949   MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
 950     : MemBarNode(C, alias_idx, precedent) {}
 951   virtual int Opcode() const;
 952 };
 953 
 954 // "Acquire" - no following ref can move before (but earlier refs can
 955 // follow, like an early Load stalled in cache).  Requires multi-cpu
 956 // visibility.  Inserted after a FastLock.
 957 class MemBarAcquireLockNode: public MemBarNode {
 958 public:
 959   MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
 960     : MemBarNode(C, alias_idx, precedent) {}
 961   virtual int Opcode() const;
 962 };
 963 
 964 // "Release" - no earlier ref can move after (but later refs can move
 965 // up, like a speculative pipelined cache-hitting Load).  Requires
 966 // multi-cpu visibility.  Inserted before a FastUnLock.
 967 class MemBarReleaseLockNode: public MemBarNode {
 968 public:
 969   MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
 970     : MemBarNode(C, alias_idx, precedent) {}
 971   virtual int Opcode() const;
 972 };
 973 
 974 class MemBarStoreStoreNode: public MemBarNode {
 975 public:
 976   MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
 977     : MemBarNode(C, alias_idx, precedent) {
 978     init_class_id(Class_MemBarStoreStore);
 979   }
 980   virtual int Opcode() const;
 981 };
 982 
 983 // Ordering between a volatile store and a following volatile load.
 984 // Requires multi-CPU visibility?
 985 class MemBarVolatileNode: public MemBarNode {
 986 public:
 987   MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
 988     : MemBarNode(C, alias_idx, precedent) {}
 989   virtual int Opcode() const;
 990 };
 991 
 992 // Ordering within the same CPU.  Used to order unsafe memory references
 993 // inside the compiler when we lack alias info.  Not needed "outside" the
 994 // compiler because the CPU does all the ordering for us.
 995 class MemBarCPUOrderNode: public MemBarNode {
 996 public:
 997   MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
 998     : MemBarNode(C, alias_idx, precedent) {}
 999   virtual int Opcode() const;
1000   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1001 };
1002 
1003 // Isolation of object setup after an AllocateNode and before next safepoint.
1004 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1005 class InitializeNode: public MemBarNode {
1006   friend class AllocateNode;
1007 
1008   enum {
1009     Incomplete    = 0,
1010     Complete      = 1,
1011     WithArraycopy = 2
1012   };
1013   int _is_complete;
1014 
1015   bool _does_not_escape;
1016 
1017 public:
1018   enum {
1019     Control    = TypeFunc::Control,
1020     Memory     = TypeFunc::Memory,     // MergeMem for states affected by this op
1021     RawAddress = TypeFunc::Parms+0,    // the newly-allocated raw address
1022     RawStores  = TypeFunc::Parms+1     // zero or more stores (or TOP)
1023   };
1024 
1025   InitializeNode(Compile* C, int adr_type, Node* rawoop);
1026   virtual int Opcode() const;
1027   virtual uint size_of() const { return sizeof(*this); }
1028   virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1029   virtual const RegMask &in_RegMask(uint) const;  // mask for RawAddress
1030 
1031   // Manage incoming memory edges via a MergeMem on in(Memory):
1032   Node* memory(uint alias_idx);
1033 
1034   // The raw memory edge coming directly from the Allocation.
1035   // The contents of this memory are *always* all-zero-bits.
1036   Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1037 
1038   // Return the corresponding allocation for this initialization (or null if none).
1039   // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1040   // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1041   AllocateNode* allocation();
1042 
1043   // Anything other than zeroing in this init?
1044   bool is_non_zero();
1045 
1046   // An InitializeNode must completed before macro expansion is done.
1047   // Completion requires that the AllocateNode must be followed by
1048   // initialization of the new memory to zero, then to any initializers.
1049   bool is_complete() { return _is_complete != Incomplete; }
1050   bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1051 
1052   // Mark complete.  (Must not yet be complete.)
1053   void set_complete(PhaseGVN* phase);
1054   void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1055 
1056   bool does_not_escape() { return _does_not_escape; }
1057   void set_does_not_escape() { _does_not_escape = true; }
1058 
1059 #ifdef ASSERT
1060   // ensure all non-degenerate stores are ordered and non-overlapping
1061   bool stores_are_sane(PhaseTransform* phase);
1062 #endif //ASSERT
1063 
1064   // See if this store can be captured; return offset where it initializes.
1065   // Return 0 if the store cannot be moved (any sort of problem).
1066   intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
1067 
1068   // Capture another store; reformat it to write my internal raw memory.
1069   // Return the captured copy, else NULL if there is some sort of problem.
1070   Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
1071 
1072   // Find captured store which corresponds to the range [start..start+size).
1073   // Return my own memory projection (meaning the initial zero bits)
1074   // if there is no such store.  Return NULL if there is a problem.
1075   Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
1076 
1077   // Called when the associated AllocateNode is expanded into CFG.
1078   Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1079                         intptr_t header_size, Node* size_in_bytes,
1080                         PhaseGVN* phase);
1081 
1082  private:
1083   void remove_extra_zeroes();
1084 
1085   // Find out where a captured store should be placed (or already is placed).
1086   int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1087                                      PhaseTransform* phase);
1088 
1089   static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
1090 
1091   Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
1092 
1093   bool detect_init_independence(Node* n, int& count);
1094 
1095   void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1096                                PhaseGVN* phase);
1097 
1098   intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1099 };
1100 
1101 //------------------------------MergeMem---------------------------------------
1102 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1103 class MergeMemNode: public Node {
1104   virtual uint hash() const ;                  // { return NO_HASH; }
1105   virtual uint cmp( const Node &n ) const ;    // Always fail, except on self
1106   friend class MergeMemStream;
1107   MergeMemNode(Node* def);  // clients use MergeMemNode::make
1108 
1109 public:
1110   // If the input is a whole memory state, clone it with all its slices intact.
1111   // Otherwise, make a new memory state with just that base memory input.
1112   // In either case, the result is a newly created MergeMem.
1113   static MergeMemNode* make(Compile* C, Node* base_memory);
1114 
1115   virtual int Opcode() const;
1116   virtual Node *Identity( PhaseTransform *phase );
1117   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1118   virtual uint ideal_reg() const { return NotAMachineReg; }
1119   virtual uint match_edge(uint idx) const { return 0; }
1120   virtual const RegMask &out_RegMask() const;
1121   virtual const Type *bottom_type() const { return Type::MEMORY; }
1122   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1123   // sparse accessors
1124   // Fetch the previously stored "set_memory_at", or else the base memory.
1125   // (Caller should clone it if it is a phi-nest.)
1126   Node* memory_at(uint alias_idx) const;
1127   // set the memory, regardless of its previous value
1128   void set_memory_at(uint alias_idx, Node* n);
1129   // the "base" is the memory that provides the non-finite support
1130   Node* base_memory() const       { return in(Compile::AliasIdxBot); }
1131   // warning: setting the base can implicitly set any of the other slices too
1132   void set_base_memory(Node* def);
1133   // sentinel value which denotes a copy of the base memory:
1134   Node*   empty_memory() const    { return in(Compile::AliasIdxTop); }
1135   static Node* make_empty_memory(); // where the sentinel comes from
1136   bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1137   // hook for the iterator, to perform any necessary setup
1138   void iteration_setup(const MergeMemNode* other = NULL);
1139   // push sentinels until I am at least as long as the other (semantic no-op)
1140   void grow_to_match(const MergeMemNode* other);
1141   bool verify_sparse() const PRODUCT_RETURN0;
1142 #ifndef PRODUCT
1143   virtual void dump_spec(outputStream *st) const;
1144 #endif
1145 };
1146 
1147 class MergeMemStream : public StackObj {
1148  private:
1149   MergeMemNode*       _mm;
1150   const MergeMemNode* _mm2;  // optional second guy, contributes non-empty iterations
1151   Node*               _mm_base;  // loop-invariant base memory of _mm
1152   int                 _idx;
1153   int                 _cnt;
1154   Node*               _mem;
1155   Node*               _mem2;
1156   int                 _cnt2;
1157 
1158   void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1159     // subsume_node will break sparseness at times, whenever a memory slice
1160     // folds down to a copy of the base ("fat") memory.  In such a case,
1161     // the raw edge will update to base, although it should be top.
1162     // This iterator will recognize either top or base_memory as an
1163     // "empty" slice.  See is_empty, is_empty2, and next below.
1164     //
1165     // The sparseness property is repaired in MergeMemNode::Ideal.
1166     // As long as access to a MergeMem goes through this iterator
1167     // or the memory_at accessor, flaws in the sparseness will
1168     // never be observed.
1169     //
1170     // Also, iteration_setup repairs sparseness.
1171     assert(mm->verify_sparse(), "please, no dups of base");
1172     assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1173 
1174     _mm  = mm;
1175     _mm_base = mm->base_memory();
1176     _mm2 = mm2;
1177     _cnt = mm->req();
1178     _idx = Compile::AliasIdxBot-1; // start at the base memory
1179     _mem = NULL;
1180     _mem2 = NULL;
1181   }
1182 
1183 #ifdef ASSERT
1184   Node* check_memory() const {
1185     if (at_base_memory())
1186       return _mm->base_memory();
1187     else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1188       return _mm->memory_at(_idx);
1189     else
1190       return _mm_base;
1191   }
1192   Node* check_memory2() const {
1193     return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1194   }
1195 #endif
1196 
1197   static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1198   void assert_synch() const {
1199     assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1200            "no side-effects except through the stream");
1201   }
1202 
1203  public:
1204 
1205   // expected usages:
1206   // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1207   // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1208 
1209   // iterate over one merge
1210   MergeMemStream(MergeMemNode* mm) {
1211     mm->iteration_setup();
1212     init(mm);
1213     debug_only(_cnt2 = 999);
1214   }
1215   // iterate in parallel over two merges
1216   // only iterates through non-empty elements of mm2
1217   MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1218     assert(mm2, "second argument must be a MergeMem also");
1219     ((MergeMemNode*)mm2)->iteration_setup();  // update hidden state
1220     mm->iteration_setup(mm2);
1221     init(mm, mm2);
1222     _cnt2 = mm2->req();
1223   }
1224 #ifdef ASSERT
1225   ~MergeMemStream() {
1226     assert_synch();
1227   }
1228 #endif
1229 
1230   MergeMemNode* all_memory() const {
1231     return _mm;
1232   }
1233   Node* base_memory() const {
1234     assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1235     return _mm_base;
1236   }
1237   const MergeMemNode* all_memory2() const {
1238     assert(_mm2 != NULL, "");
1239     return _mm2;
1240   }
1241   bool at_base_memory() const {
1242     return _idx == Compile::AliasIdxBot;
1243   }
1244   int alias_idx() const {
1245     assert(_mem, "must call next 1st");
1246     return _idx;
1247   }
1248 
1249   const TypePtr* adr_type() const {
1250     return Compile::current()->get_adr_type(alias_idx());
1251   }
1252 
1253   const TypePtr* adr_type(Compile* C) const {
1254     return C->get_adr_type(alias_idx());
1255   }
1256   bool is_empty() const {
1257     assert(_mem, "must call next 1st");
1258     assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1259     return _mem->is_top();
1260   }
1261   bool is_empty2() const {
1262     assert(_mem2, "must call next 1st");
1263     assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1264     return _mem2->is_top();
1265   }
1266   Node* memory() const {
1267     assert(!is_empty(), "must not be empty");
1268     assert_synch();
1269     return _mem;
1270   }
1271   // get the current memory, regardless of empty or non-empty status
1272   Node* force_memory() const {
1273     assert(!is_empty() || !at_base_memory(), "");
1274     // Use _mm_base to defend against updates to _mem->base_memory().
1275     Node *mem = _mem->is_top() ? _mm_base : _mem;
1276     assert(mem == check_memory(), "");
1277     return mem;
1278   }
1279   Node* memory2() const {
1280     assert(_mem2 == check_memory2(), "");
1281     return _mem2;
1282   }
1283   void set_memory(Node* mem) {
1284     if (at_base_memory()) {
1285       // Note that this does not change the invariant _mm_base.
1286       _mm->set_base_memory(mem);
1287     } else {
1288       _mm->set_memory_at(_idx, mem);
1289     }
1290     _mem = mem;
1291     assert_synch();
1292   }
1293 
1294   // Recover from a side effect to the MergeMemNode.
1295   void set_memory() {
1296     _mem = _mm->in(_idx);
1297   }
1298 
1299   bool next()  { return next(false); }
1300   bool next2() { return next(true); }
1301 
1302   bool next_non_empty()  { return next_non_empty(false); }
1303   bool next_non_empty2() { return next_non_empty(true); }
1304   // next_non_empty2 can yield states where is_empty() is true
1305 
1306  private:
1307   // find the next item, which might be empty
1308   bool next(bool have_mm2) {
1309     assert((_mm2 != NULL) == have_mm2, "use other next");
1310     assert_synch();
1311     if (++_idx < _cnt) {
1312       // Note:  This iterator allows _mm to be non-sparse.
1313       // It behaves the same whether _mem is top or base_memory.
1314       _mem = _mm->in(_idx);
1315       if (have_mm2)
1316         _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1317       return true;
1318     }
1319     return false;
1320   }
1321 
1322   // find the next non-empty item
1323   bool next_non_empty(bool have_mm2) {
1324     while (next(have_mm2)) {
1325       if (!is_empty()) {
1326         // make sure _mem2 is filled in sensibly
1327         if (have_mm2 && _mem2->is_top())  _mem2 = _mm2->base_memory();
1328         return true;
1329       } else if (have_mm2 && !is_empty2()) {
1330         return true;   // is_empty() == true
1331       }
1332     }
1333     return false;
1334   }
1335 };
1336 
1337 //------------------------------Prefetch---------------------------------------
1338 
1339 // Non-faulting prefetch load.  Prefetch for many reads.
1340 class PrefetchReadNode : public Node {
1341 public:
1342   PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1343   virtual int Opcode() const;
1344   virtual uint ideal_reg() const { return NotAMachineReg; }
1345   virtual uint match_edge(uint idx) const { return idx==2; }
1346   virtual const Type *bottom_type() const { return Type::ABIO; }
1347 };
1348 
1349 // Non-faulting prefetch load.  Prefetch for many reads & many writes.
1350 class PrefetchWriteNode : public Node {
1351 public:
1352   PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1353   virtual int Opcode() const;
1354   virtual uint ideal_reg() const { return NotAMachineReg; }
1355   virtual uint match_edge(uint idx) const { return idx==2; }
1356   virtual const Type *bottom_type() const { return Type::ABIO; }
1357 };
1358 
1359 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1360 class PrefetchAllocationNode : public Node {
1361 public:
1362   PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
1363   virtual int Opcode() const;
1364   virtual uint ideal_reg() const { return NotAMachineReg; }
1365   virtual uint match_edge(uint idx) const { return idx==2; }
1366   virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1367 };
1368 
1369 #endif // SHARE_VM_OPTO_MEMNODE_HPP