< prev index next >

src/share/vm/opto/memnode.cpp

Print this page




1052 
1053 //----------------------is_instance_field_load_with_local_phi------------------
1054 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1055   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1056       in(Address)->is_AddP() ) {
1057     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1058     // Only instances and boxed values.
1059     if( t_oop != NULL &&
1060         (t_oop->is_ptr_to_boxed_value() ||
1061          t_oop->is_known_instance_field()) &&
1062         t_oop->offset() != Type::OffsetBot &&
1063         t_oop->offset() != Type::OffsetTop) {
1064       return true;
1065     }
1066   }
1067   return false;
1068 }
1069 
1070 //------------------------------Identity---------------------------------------
1071 // Loads are identity if previous store is to same address
1072 Node *LoadNode::Identity( PhaseTransform *phase ) {
1073   // If the previous store-maker is the right kind of Store, and the store is
1074   // to the same address, then we are equal to the value stored.
1075   Node* mem = in(Memory);
1076   Node* value = can_see_stored_value(mem, phase);
1077   if( value ) {
1078     // byte, short & char stores truncate naturally.
1079     // A load has to load the truncated value which requires
1080     // some sort of masking operation and that requires an
1081     // Ideal call instead of an Identity call.
1082     if (memory_size() < BytesPerInt) {
1083       // If the input to the store does not fit with the load's result type,
1084       // it must be truncated via an Ideal call.
1085       if (!phase->type(value)->higher_equal(phase->type(this)))
1086         return this;
1087     }
1088     // (This works even when value is a Con, but LoadNode::Value
1089     // usually runs first, producing the singleton type of the Con.)
1090     return value;
1091   }
1092 


1598         // Join with the array element type, in case it is also stable.
1599         int dim = ary->stable_dimension();
1600         con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
1601       }
1602       if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
1603         con_type = con_type->make_narrowoop();
1604       }
1605 #ifndef PRODUCT
1606       if (TraceIterativeGVN) {
1607         tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
1608         con_type->dump(); tty->cr();
1609       }
1610 #endif //PRODUCT
1611       return con_type;
1612     }
1613   }
1614   return NULL;
1615 }
1616 
1617 //------------------------------Value-----------------------------------------
1618 const Type *LoadNode::Value( PhaseTransform *phase ) const {
1619   // Either input is TOP ==> the result is TOP
1620   Node* mem = in(MemNode::Memory);
1621   const Type *t1 = phase->type(mem);
1622   if (t1 == Type::TOP)  return Type::TOP;
1623   Node* adr = in(MemNode::Address);
1624   const TypePtr* tp = phase->type(adr)->isa_ptr();
1625   if (tp == NULL || tp->empty())  return Type::TOP;
1626   int off = tp->offset();
1627   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1628   Compile* C = phase->C;
1629 
1630   // Try to guess loaded type from pointer type
1631   if (tp->isa_aryptr()) {
1632     const TypeAryPtr* ary = tp->is_aryptr();
1633     const Type* t = ary->elem();
1634 
1635     // Determine whether the reference is beyond the header or not, by comparing
1636     // the offset against the offset of the start of the array's data.
1637     // Different array types begin at slightly different offsets (12 vs. 16).
1638     // We choose T_BYTE as an example base type that is least restrictive


1884 }
1885 
1886 //--------------------------LoadBNode::Ideal--------------------------------------
1887 //
1888 //  If the previous store is to the same address as this load,
1889 //  and the value stored was larger than a byte, replace this load
1890 //  with the value stored truncated to a byte.  If no truncation is
1891 //  needed, the replacement is done in LoadNode::Identity().
1892 //
1893 Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1894   Node* mem = in(MemNode::Memory);
1895   Node* value = can_see_stored_value(mem,phase);
1896   if( value && !phase->type(value)->higher_equal( _type ) ) {
1897     Node *result = phase->transform( new LShiftINode(value, phase->intcon(24)) );
1898     return new RShiftINode(result, phase->intcon(24));
1899   }
1900   // Identity call will handle the case where truncation is not needed.
1901   return LoadNode::Ideal(phase, can_reshape);
1902 }
1903 
1904 const Type* LoadBNode::Value(PhaseTransform *phase) const {
1905   Node* mem = in(MemNode::Memory);
1906   Node* value = can_see_stored_value(mem,phase);
1907   if (value != NULL && value->is_Con() &&
1908       !value->bottom_type()->higher_equal(_type)) {
1909     // If the input to the store does not fit with the load's result type,
1910     // it must be truncated. We can't delay until Ideal call since
1911     // a singleton Value is needed for split_thru_phi optimization.
1912     int con = value->get_int();
1913     return TypeInt::make((con << 24) >> 24);
1914   }
1915   return LoadNode::Value(phase);
1916 }
1917 
1918 //--------------------------LoadUBNode::Ideal-------------------------------------
1919 //
1920 //  If the previous store is to the same address as this load,
1921 //  and the value stored was larger than a byte, replace this load
1922 //  with the value stored truncated to a byte.  If no truncation is
1923 //  needed, the replacement is done in LoadNode::Identity().
1924 //
1925 Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1926   Node* mem = in(MemNode::Memory);
1927   Node* value = can_see_stored_value(mem, phase);
1928   if (value && !phase->type(value)->higher_equal(_type))
1929     return new AndINode(value, phase->intcon(0xFF));
1930   // Identity call will handle the case where truncation is not needed.
1931   return LoadNode::Ideal(phase, can_reshape);
1932 }
1933 
1934 const Type* LoadUBNode::Value(PhaseTransform *phase) const {
1935   Node* mem = in(MemNode::Memory);
1936   Node* value = can_see_stored_value(mem,phase);
1937   if (value != NULL && value->is_Con() &&
1938       !value->bottom_type()->higher_equal(_type)) {
1939     // If the input to the store does not fit with the load's result type,
1940     // it must be truncated. We can't delay until Ideal call since
1941     // a singleton Value is needed for split_thru_phi optimization.
1942     int con = value->get_int();
1943     return TypeInt::make(con & 0xFF);
1944   }
1945   return LoadNode::Value(phase);
1946 }
1947 
1948 //--------------------------LoadUSNode::Ideal-------------------------------------
1949 //
1950 //  If the previous store is to the same address as this load,
1951 //  and the value stored was larger than a char, replace this load
1952 //  with the value stored truncated to a char.  If no truncation is
1953 //  needed, the replacement is done in LoadNode::Identity().
1954 //
1955 Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1956   Node* mem = in(MemNode::Memory);
1957   Node* value = can_see_stored_value(mem,phase);
1958   if( value && !phase->type(value)->higher_equal( _type ) )
1959     return new AndINode(value,phase->intcon(0xFFFF));
1960   // Identity call will handle the case where truncation is not needed.
1961   return LoadNode::Ideal(phase, can_reshape);
1962 }
1963 
1964 const Type* LoadUSNode::Value(PhaseTransform *phase) const {
1965   Node* mem = in(MemNode::Memory);
1966   Node* value = can_see_stored_value(mem,phase);
1967   if (value != NULL && value->is_Con() &&
1968       !value->bottom_type()->higher_equal(_type)) {
1969     // If the input to the store does not fit with the load's result type,
1970     // it must be truncated. We can't delay until Ideal call since
1971     // a singleton Value is needed for split_thru_phi optimization.
1972     int con = value->get_int();
1973     return TypeInt::make(con & 0xFFFF);
1974   }
1975   return LoadNode::Value(phase);
1976 }
1977 
1978 //--------------------------LoadSNode::Ideal--------------------------------------
1979 //
1980 //  If the previous store is to the same address as this load,
1981 //  and the value stored was larger than a short, replace this load
1982 //  with the value stored truncated to a short.  If no truncation is
1983 //  needed, the replacement is done in LoadNode::Identity().
1984 //
1985 Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1986   Node* mem = in(MemNode::Memory);
1987   Node* value = can_see_stored_value(mem,phase);
1988   if( value && !phase->type(value)->higher_equal( _type ) ) {
1989     Node *result = phase->transform( new LShiftINode(value, phase->intcon(16)) );
1990     return new RShiftINode(result, phase->intcon(16));
1991   }
1992   // Identity call will handle the case where truncation is not needed.
1993   return LoadNode::Ideal(phase, can_reshape);
1994 }
1995 
1996 const Type* LoadSNode::Value(PhaseTransform *phase) const {
1997   Node* mem = in(MemNode::Memory);
1998   Node* value = can_see_stored_value(mem,phase);
1999   if (value != NULL && value->is_Con() &&
2000       !value->bottom_type()->higher_equal(_type)) {
2001     // If the input to the store does not fit with the load's result type,
2002     // it must be truncated. We can't delay until Ideal call since
2003     // a singleton Value is needed for split_thru_phi optimization.
2004     int con = value->get_int();
2005     return TypeInt::make((con << 16) >> 16);
2006   }
2007   return LoadNode::Value(phase);
2008 }
2009 
2010 //=============================================================================
2011 //----------------------------LoadKlassNode::make------------------------------
2012 // Polymorphic factory method:
2013 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2014   // sanity check the alias category against the created node type
2015   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2016   assert(adr_type != NULL, "expecting TypeKlassPtr");
2017 #ifdef _LP64
2018   if (adr_type->is_ptr_to_narrowklass()) {
2019     assert(UseCompressedClassPointers, "no compressed klasses");
2020     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2021     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2022   }
2023 #endif
2024   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2025   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2026 }
2027 
2028 //------------------------------Value------------------------------------------
2029 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
2030   return klass_value_common(phase);
2031 }
2032 
2033 // In most cases, LoadKlassNode does not have the control input set. If the control
2034 // input is set, it must not be removed (by LoadNode::Ideal()).
2035 bool LoadKlassNode::can_remove_control() const {
2036   return false;
2037 }
2038 
2039 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
2040   // Either input is TOP ==> the result is TOP
2041   const Type *t1 = phase->type( in(MemNode::Memory) );
2042   if (t1 == Type::TOP)  return Type::TOP;
2043   Node *adr = in(MemNode::Address);
2044   const Type *t2 = phase->type( adr );
2045   if (t2 == Type::TOP)  return Type::TOP;
2046   const TypePtr *tp = t2->is_ptr();
2047   if (TypePtr::above_centerline(tp->ptr()) ||
2048       tp->ptr() == TypePtr::Null)  return Type::TOP;
2049 
2050   // Return a more precise klass, if possible
2051   const TypeInstPtr *tinst = tp->isa_instptr();
2052   if (tinst != NULL) {
2053     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2054     int offset = tinst->offset();
2055     if (ik == phase->C->env()->Class_klass()
2056         && (offset == java_lang_Class::klass_offset_in_bytes() ||
2057             offset == java_lang_Class::array_klass_offset_in_bytes())) {
2058       // We are loading a special hidden field from a Class mirror object,
2059       // the field which points to the VM's Klass metaobject.


2155       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2156       // according to the element type's subclassing.
2157       return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
2158     }
2159     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2160         tkls->offset() == in_bytes(Klass::super_offset())) {
2161       ciKlass* sup = klass->as_instance_klass()->super();
2162       // The field is Klass::_super.  Return its (constant) value.
2163       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2164       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2165     }
2166   }
2167 
2168   // Bailout case
2169   return LoadNode::Value(phase);
2170 }
2171 
2172 //------------------------------Identity---------------------------------------
2173 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2174 // Also feed through the klass in Allocate(...klass...)._klass.
2175 Node* LoadKlassNode::Identity( PhaseTransform *phase ) {
2176   return klass_identity_common(phase);
2177 }
2178 
2179 Node* LoadNode::klass_identity_common(PhaseTransform *phase ) {
2180   Node* x = LoadNode::Identity(phase);
2181   if (x != this)  return x;
2182 
2183   // Take apart the address into an oop and and offset.
2184   // Return 'this' if we cannot.
2185   Node*    adr    = in(MemNode::Address);
2186   intptr_t offset = 0;
2187   Node*    base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2188   if (base == NULL)     return this;
2189   const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2190   if (toop == NULL)     return this;
2191 
2192   // We can fetch the klass directly through an AllocateNode.
2193   // This works even if the klass is not constant (clone or newArray).
2194   if (offset == oopDesc::klass_offset_in_bytes()) {
2195     Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2196     if (allocated_klass != NULL) {
2197       return allocated_klass;
2198     }
2199   }


2214       Node* adr2 = base->in(MemNode::Address);
2215       const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2216       if (tkls != NULL && !tkls->empty()
2217           && (tkls->klass()->is_instance_klass() ||
2218               tkls->klass()->is_array_klass())
2219           && adr2->is_AddP()
2220           ) {
2221         int mirror_field = in_bytes(Klass::java_mirror_offset());
2222         if (tkls->offset() == mirror_field) {
2223           return adr2->in(AddPNode::Base);
2224         }
2225       }
2226     }
2227   }
2228 
2229   return this;
2230 }
2231 
2232 
2233 //------------------------------Value------------------------------------------
2234 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const {
2235   const Type *t = klass_value_common(phase);
2236   if (t == Type::TOP)
2237     return t;
2238 
2239   return t->make_narrowklass();
2240 }
2241 
2242 //------------------------------Identity---------------------------------------
2243 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2244 // Also feed through the klass in Allocate(...klass...)._klass.
2245 Node* LoadNKlassNode::Identity( PhaseTransform *phase ) {
2246   Node *x = klass_identity_common(phase);
2247 
2248   const Type *t = phase->type( x );
2249   if( t == Type::TOP ) return x;
2250   if( t->isa_narrowklass()) return x;
2251   assert (!t->isa_narrowoop(), "no narrow oop here");
2252 
2253   return phase->transform(new EncodePKlassNode(x, t->make_narrowklass()));
2254 }
2255 
2256 //------------------------------Value-----------------------------------------
2257 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
2258   // Either input is TOP ==> the result is TOP
2259   const Type *t1 = phase->type( in(MemNode::Memory) );
2260   if( t1 == Type::TOP ) return Type::TOP;
2261   Node *adr = in(MemNode::Address);
2262   const Type *t2 = phase->type( adr );
2263   if( t2 == Type::TOP ) return Type::TOP;
2264   const TypePtr *tp = t2->is_ptr();
2265   if (TypePtr::above_centerline(tp->ptr()))  return Type::TOP;
2266   const TypeAryPtr *tap = tp->isa_aryptr();
2267   if( !tap ) return _type;
2268   return tap->size();
2269 }
2270 
2271 //-------------------------------Ideal---------------------------------------
2272 // Feed through the length in AllocateArray(...length...)._length.
2273 Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2274   Node* p = MemNode::Ideal_common(phase, can_reshape);
2275   if (p)  return (p == NodeSentinel) ? NULL : p;
2276 
2277   // Take apart the address into an oop and and offset.


2285 
2286   // We can fetch the length directly through an AllocateArrayNode.
2287   // This works even if the length is not constant (clone or newArray).
2288   if (offset == arrayOopDesc::length_offset_in_bytes()) {
2289     AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2290     if (alloc != NULL) {
2291       Node* allocated_length = alloc->Ideal_length();
2292       Node* len = alloc->make_ideal_length(tary, phase);
2293       if (allocated_length != len) {
2294         // New CastII improves on this.
2295         return len;
2296       }
2297     }
2298   }
2299 
2300   return NULL;
2301 }
2302 
2303 //------------------------------Identity---------------------------------------
2304 // Feed through the length in AllocateArray(...length...)._length.
2305 Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
2306   Node* x = LoadINode::Identity(phase);
2307   if (x != this)  return x;
2308 
2309   // Take apart the address into an oop and and offset.
2310   // Return 'this' if we cannot.
2311   Node*    adr    = in(MemNode::Address);
2312   intptr_t offset = 0;
2313   Node*    base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2314   if (base == NULL)     return this;
2315   const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
2316   if (tary == NULL)     return this;
2317 
2318   // We can fetch the length directly through an AllocateArrayNode.
2319   // This works even if the length is not constant (clone or newArray).
2320   if (offset == arrayOopDesc::length_offset_in_bytes()) {
2321     AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2322     if (alloc != NULL) {
2323       Node* allocated_length = alloc->Ideal_length();
2324       // Do not allow make_ideal_length to allocate a CastII node.
2325       Node* len = alloc->make_ideal_length(tary, phase, false);


2456   if (ReduceFieldZeroing && /*can_reshape &&*/
2457       mem->is_Proj() && mem->in(0)->is_Initialize()) {
2458     InitializeNode* init = mem->in(0)->as_Initialize();
2459     intptr_t offset = init->can_capture_store(this, phase, can_reshape);
2460     if (offset > 0) {
2461       Node* moved = init->capture_store(this, offset, phase, can_reshape);
2462       // If the InitializeNode captured me, it made a raw copy of me,
2463       // and I need to disappear.
2464       if (moved != NULL) {
2465         // %%% hack to ensure that Ideal returns a new node:
2466         mem = MergeMemNode::make(mem);
2467         return mem;             // fold me away
2468       }
2469     }
2470   }
2471 
2472   return NULL;                  // No further progress
2473 }
2474 
2475 //------------------------------Value-----------------------------------------
2476 const Type *StoreNode::Value( PhaseTransform *phase ) const {
2477   // Either input is TOP ==> the result is TOP
2478   const Type *t1 = phase->type( in(MemNode::Memory) );
2479   if( t1 == Type::TOP ) return Type::TOP;
2480   const Type *t2 = phase->type( in(MemNode::Address) );
2481   if( t2 == Type::TOP ) return Type::TOP;
2482   const Type *t3 = phase->type( in(MemNode::ValueIn) );
2483   if( t3 == Type::TOP ) return Type::TOP;
2484   return Type::MEMORY;
2485 }
2486 
2487 //------------------------------Identity---------------------------------------
2488 // Remove redundant stores:
2489 //   Store(m, p, Load(m, p)) changes to m.
2490 //   Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x).
2491 Node *StoreNode::Identity( PhaseTransform *phase ) {
2492   Node* mem = in(MemNode::Memory);
2493   Node* adr = in(MemNode::Address);
2494   Node* val = in(MemNode::ValueIn);
2495 
2496   // Load then Store?  Then the Store is useless
2497   if (val->is_Load() &&
2498       val->in(MemNode::Address)->eqv_uncast(adr) &&
2499       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2500       val->as_Load()->store_Opcode() == Opcode()) {
2501     return mem;
2502   }
2503 
2504   // Two stores in a row of the same value?
2505   if (mem->is_Store() &&
2506       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2507       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2508       mem->Opcode() == Opcode()) {
2509     return mem;
2510   }
2511 


2625   return StoreNode::Ideal(phase, can_reshape);
2626 }
2627 
2628 //=============================================================================
2629 //------------------------------Ideal------------------------------------------
2630 // If the store is from an AND mask that leaves the low bits untouched, then
2631 // we can skip the AND operation
2632 Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){
2633   Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF);
2634   if( progress != NULL ) return progress;
2635 
2636   progress = StoreNode::Ideal_sign_extended_input(phase, 16);
2637   if( progress != NULL ) return progress;
2638 
2639   // Finally check the default case
2640   return StoreNode::Ideal(phase, can_reshape);
2641 }
2642 
2643 //=============================================================================
2644 //------------------------------Identity---------------------------------------
2645 Node *StoreCMNode::Identity( PhaseTransform *phase ) {
2646   // No need to card mark when storing a null ptr
2647   Node* my_store = in(MemNode::OopStore);
2648   if (my_store->is_Store()) {
2649     const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
2650     if( t1 == TypePtr::NULL_PTR ) {
2651       return in(MemNode::Memory);
2652     }
2653   }
2654   return this;
2655 }
2656 
2657 //=============================================================================
2658 //------------------------------Ideal---------------------------------------
2659 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
2660   Node* progress = StoreNode::Ideal(phase, can_reshape);
2661   if (progress != NULL) return progress;
2662 
2663   Node* my_store = in(MemNode::OopStore);
2664   if (my_store->is_MergeMem()) {
2665     Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
2666     set_req(MemNode::OopStore, mem);
2667     return this;
2668   }
2669 
2670   return NULL;
2671 }
2672 
2673 //------------------------------Value-----------------------------------------
2674 const Type *StoreCMNode::Value( PhaseTransform *phase ) const {
2675   // Either input is TOP ==> the result is TOP
2676   const Type *t = phase->type( in(MemNode::Memory) );
2677   if( t == Type::TOP ) return Type::TOP;
2678   t = phase->type( in(MemNode::Address) );
2679   if( t == Type::TOP ) return Type::TOP;
2680   t = phase->type( in(MemNode::ValueIn) );
2681   if( t == Type::TOP ) return Type::TOP;
2682   // If extra input is TOP ==> the result is TOP
2683   t = phase->type( in(MemNode::OopStore) );
2684   if( t == Type::TOP ) return Type::TOP;
2685 
2686   return StoreNode::Value( phase );
2687 }
2688 
2689 
2690 //=============================================================================
2691 //----------------------------------SCMemProjNode------------------------------
2692 const Type * SCMemProjNode::Value( PhaseTransform *phase ) const
2693 {
2694   return bottom_type();
2695 }
2696 
2697 //=============================================================================
2698 //----------------------------------LoadStoreNode------------------------------
2699 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2700   : Node(required),
2701     _type(rt),
2702     _adr_type(at)
2703 {
2704   init_req(MemNode::Control, c  );
2705   init_req(MemNode::Memory , mem);
2706   init_req(MemNode::Address, adr);
2707   init_req(MemNode::ValueIn, val);
2708   init_class_id(Class_LoadStore);
2709 }
2710 
2711 uint LoadStoreNode::ideal_reg() const {
2712   return _type->ideal_reg();


2728 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
2729   init_req(ExpectedIn, ex );
2730 }
2731 
2732 //=============================================================================
2733 //-------------------------------adr_type--------------------------------------
2734 const TypePtr* ClearArrayNode::adr_type() const {
2735   Node *adr = in(3);
2736   if (adr == NULL)  return NULL; // node is dead
2737   return MemNode::calculate_adr_type(adr->bottom_type());
2738 }
2739 
2740 //------------------------------match_edge-------------------------------------
2741 // Do we Match on this edge index or not?  Do not match memory
2742 uint ClearArrayNode::match_edge(uint idx) const {
2743   return idx > 1;
2744 }
2745 
2746 //------------------------------Identity---------------------------------------
2747 // Clearing a zero length array does nothing
2748 Node *ClearArrayNode::Identity( PhaseTransform *phase ) {
2749   return phase->type(in(2))->higher_equal(TypeX::ZERO)  ? in(1) : this;
2750 }
2751 
2752 //------------------------------Idealize---------------------------------------
2753 // Clearing a short array is faster with stores
2754 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
2755   const int unit = BytesPerLong;
2756   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2757   if (!t)  return NULL;
2758   if (!t->is_con())  return NULL;
2759   intptr_t raw_count = t->get_con();
2760   intptr_t size = raw_count;
2761   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2762   // Clearing nothing uses the Identity call.
2763   // Negative clears are possible on dead ClearArrays
2764   // (see jck test stmt114.stmt11402.val).
2765   if (size <= 0 || size % unit != 0)  return NULL;
2766   intptr_t count = size / unit;
2767   // Length too long; use fast hardware clear
2768   if (size > Matcher::init_array_short_size)  return NULL;


2984       if ((alloc != NULL) && alloc->is_Allocate() &&
2985           alloc->as_Allocate()->does_not_escape_thread()) {
2986         // The allocated object does not escape.
2987         eliminate = true;
2988       }
2989     }
2990     if (eliminate) {
2991       // Replace MemBar projections by its inputs.
2992       PhaseIterGVN* igvn = phase->is_IterGVN();
2993       igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
2994       igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
2995       // Must return either the original node (now dead) or a new node
2996       // (Do not return a top here, since that would break the uniqueness of top.)
2997       return new ConINode(TypeInt::ZERO);
2998     }
2999   }
3000   return progress ? this : NULL;
3001 }
3002 
3003 //------------------------------Value------------------------------------------
3004 const Type *MemBarNode::Value( PhaseTransform *phase ) const {
3005   if( !in(0) ) return Type::TOP;
3006   if( phase->type(in(0)) == Type::TOP )
3007     return Type::TOP;
3008   return TypeTuple::MEMBAR;
3009 }
3010 
3011 //------------------------------match------------------------------------------
3012 // Construct projections for memory.
3013 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3014   switch (proj->_con) {
3015   case TypeFunc::Control:
3016   case TypeFunc::Memory:
3017     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3018   }
3019   ShouldNotReachHere();
3020   return NULL;
3021 }
3022 
3023 //===========================InitializeNode====================================
3024 // SUMMARY:


4126     }
4127     assert(base_memory() == mdef->base_memory(), "");
4128   } else {
4129     set_base_memory(new_base);
4130   }
4131 }
4132 
4133 // Make a new, untransformed MergeMem with the same base as 'mem'.
4134 // If mem is itself a MergeMem, populate the result with the same edges.
4135 MergeMemNode* MergeMemNode::make(Node* mem) {
4136   return new MergeMemNode(mem);
4137 }
4138 
4139 //------------------------------cmp--------------------------------------------
4140 uint MergeMemNode::hash() const { return NO_HASH; }
4141 uint MergeMemNode::cmp( const Node &n ) const {
4142   return (&n == this);          // Always fail except on self
4143 }
4144 
4145 //------------------------------Identity---------------------------------------
4146 Node* MergeMemNode::Identity(PhaseTransform *phase) {
4147   // Identity if this merge point does not record any interesting memory
4148   // disambiguations.
4149   Node* base_mem = base_memory();
4150   Node* empty_mem = empty_memory();
4151   if (base_mem != empty_mem) {  // Memory path is not dead?
4152     for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4153       Node* mem = in(i);
4154       if (mem != empty_mem && mem != base_mem) {
4155         return this;            // Many memory splits; no change
4156       }
4157     }
4158   }
4159   return base_mem;              // No memory splits; ID on the one true input
4160 }
4161 
4162 //------------------------------Ideal------------------------------------------
4163 // This method is invoked recursively on chains of MergeMem nodes
4164 Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4165   // Remove chain'd MergeMems
4166   //




1052 
1053 //----------------------is_instance_field_load_with_local_phi------------------
1054 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
1055   if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl &&
1056       in(Address)->is_AddP() ) {
1057     const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr();
1058     // Only instances and boxed values.
1059     if( t_oop != NULL &&
1060         (t_oop->is_ptr_to_boxed_value() ||
1061          t_oop->is_known_instance_field()) &&
1062         t_oop->offset() != Type::OffsetBot &&
1063         t_oop->offset() != Type::OffsetTop) {
1064       return true;
1065     }
1066   }
1067   return false;
1068 }
1069 
1070 //------------------------------Identity---------------------------------------
1071 // Loads are identity if previous store is to same address
1072 Node* LoadNode::Identity(PhaseGVN* phase) {
1073   // If the previous store-maker is the right kind of Store, and the store is
1074   // to the same address, then we are equal to the value stored.
1075   Node* mem = in(Memory);
1076   Node* value = can_see_stored_value(mem, phase);
1077   if( value ) {
1078     // byte, short & char stores truncate naturally.
1079     // A load has to load the truncated value which requires
1080     // some sort of masking operation and that requires an
1081     // Ideal call instead of an Identity call.
1082     if (memory_size() < BytesPerInt) {
1083       // If the input to the store does not fit with the load's result type,
1084       // it must be truncated via an Ideal call.
1085       if (!phase->type(value)->higher_equal(phase->type(this)))
1086         return this;
1087     }
1088     // (This works even when value is a Con, but LoadNode::Value
1089     // usually runs first, producing the singleton type of the Con.)
1090     return value;
1091   }
1092 


1598         // Join with the array element type, in case it is also stable.
1599         int dim = ary->stable_dimension();
1600         con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
1601       }
1602       if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
1603         con_type = con_type->make_narrowoop();
1604       }
1605 #ifndef PRODUCT
1606       if (TraceIterativeGVN) {
1607         tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
1608         con_type->dump(); tty->cr();
1609       }
1610 #endif //PRODUCT
1611       return con_type;
1612     }
1613   }
1614   return NULL;
1615 }
1616 
1617 //------------------------------Value-----------------------------------------
1618 const Type* LoadNode::Value(PhaseGVN* phase) const {
1619   // Either input is TOP ==> the result is TOP
1620   Node* mem = in(MemNode::Memory);
1621   const Type *t1 = phase->type(mem);
1622   if (t1 == Type::TOP)  return Type::TOP;
1623   Node* adr = in(MemNode::Address);
1624   const TypePtr* tp = phase->type(adr)->isa_ptr();
1625   if (tp == NULL || tp->empty())  return Type::TOP;
1626   int off = tp->offset();
1627   assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
1628   Compile* C = phase->C;
1629 
1630   // Try to guess loaded type from pointer type
1631   if (tp->isa_aryptr()) {
1632     const TypeAryPtr* ary = tp->is_aryptr();
1633     const Type* t = ary->elem();
1634 
1635     // Determine whether the reference is beyond the header or not, by comparing
1636     // the offset against the offset of the start of the array's data.
1637     // Different array types begin at slightly different offsets (12 vs. 16).
1638     // We choose T_BYTE as an example base type that is least restrictive


1884 }
1885 
1886 //--------------------------LoadBNode::Ideal--------------------------------------
1887 //
1888 //  If the previous store is to the same address as this load,
1889 //  and the value stored was larger than a byte, replace this load
1890 //  with the value stored truncated to a byte.  If no truncation is
1891 //  needed, the replacement is done in LoadNode::Identity().
1892 //
1893 Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1894   Node* mem = in(MemNode::Memory);
1895   Node* value = can_see_stored_value(mem,phase);
1896   if( value && !phase->type(value)->higher_equal( _type ) ) {
1897     Node *result = phase->transform( new LShiftINode(value, phase->intcon(24)) );
1898     return new RShiftINode(result, phase->intcon(24));
1899   }
1900   // Identity call will handle the case where truncation is not needed.
1901   return LoadNode::Ideal(phase, can_reshape);
1902 }
1903 
1904 const Type* LoadBNode::Value(PhaseGVN* phase) const {
1905   Node* mem = in(MemNode::Memory);
1906   Node* value = can_see_stored_value(mem,phase);
1907   if (value != NULL && value->is_Con() &&
1908       !value->bottom_type()->higher_equal(_type)) {
1909     // If the input to the store does not fit with the load's result type,
1910     // it must be truncated. We can't delay until Ideal call since
1911     // a singleton Value is needed for split_thru_phi optimization.
1912     int con = value->get_int();
1913     return TypeInt::make((con << 24) >> 24);
1914   }
1915   return LoadNode::Value(phase);
1916 }
1917 
1918 //--------------------------LoadUBNode::Ideal-------------------------------------
1919 //
1920 //  If the previous store is to the same address as this load,
1921 //  and the value stored was larger than a byte, replace this load
1922 //  with the value stored truncated to a byte.  If no truncation is
1923 //  needed, the replacement is done in LoadNode::Identity().
1924 //
1925 Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1926   Node* mem = in(MemNode::Memory);
1927   Node* value = can_see_stored_value(mem, phase);
1928   if (value && !phase->type(value)->higher_equal(_type))
1929     return new AndINode(value, phase->intcon(0xFF));
1930   // Identity call will handle the case where truncation is not needed.
1931   return LoadNode::Ideal(phase, can_reshape);
1932 }
1933 
1934 const Type* LoadUBNode::Value(PhaseGVN* phase) const {
1935   Node* mem = in(MemNode::Memory);
1936   Node* value = can_see_stored_value(mem,phase);
1937   if (value != NULL && value->is_Con() &&
1938       !value->bottom_type()->higher_equal(_type)) {
1939     // If the input to the store does not fit with the load's result type,
1940     // it must be truncated. We can't delay until Ideal call since
1941     // a singleton Value is needed for split_thru_phi optimization.
1942     int con = value->get_int();
1943     return TypeInt::make(con & 0xFF);
1944   }
1945   return LoadNode::Value(phase);
1946 }
1947 
1948 //--------------------------LoadUSNode::Ideal-------------------------------------
1949 //
1950 //  If the previous store is to the same address as this load,
1951 //  and the value stored was larger than a char, replace this load
1952 //  with the value stored truncated to a char.  If no truncation is
1953 //  needed, the replacement is done in LoadNode::Identity().
1954 //
1955 Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1956   Node* mem = in(MemNode::Memory);
1957   Node* value = can_see_stored_value(mem,phase);
1958   if( value && !phase->type(value)->higher_equal( _type ) )
1959     return new AndINode(value,phase->intcon(0xFFFF));
1960   // Identity call will handle the case where truncation is not needed.
1961   return LoadNode::Ideal(phase, can_reshape);
1962 }
1963 
1964 const Type* LoadUSNode::Value(PhaseGVN* phase) const {
1965   Node* mem = in(MemNode::Memory);
1966   Node* value = can_see_stored_value(mem,phase);
1967   if (value != NULL && value->is_Con() &&
1968       !value->bottom_type()->higher_equal(_type)) {
1969     // If the input to the store does not fit with the load's result type,
1970     // it must be truncated. We can't delay until Ideal call since
1971     // a singleton Value is needed for split_thru_phi optimization.
1972     int con = value->get_int();
1973     return TypeInt::make(con & 0xFFFF);
1974   }
1975   return LoadNode::Value(phase);
1976 }
1977 
1978 //--------------------------LoadSNode::Ideal--------------------------------------
1979 //
1980 //  If the previous store is to the same address as this load,
1981 //  and the value stored was larger than a short, replace this load
1982 //  with the value stored truncated to a short.  If no truncation is
1983 //  needed, the replacement is done in LoadNode::Identity().
1984 //
1985 Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1986   Node* mem = in(MemNode::Memory);
1987   Node* value = can_see_stored_value(mem,phase);
1988   if( value && !phase->type(value)->higher_equal( _type ) ) {
1989     Node *result = phase->transform( new LShiftINode(value, phase->intcon(16)) );
1990     return new RShiftINode(result, phase->intcon(16));
1991   }
1992   // Identity call will handle the case where truncation is not needed.
1993   return LoadNode::Ideal(phase, can_reshape);
1994 }
1995 
1996 const Type* LoadSNode::Value(PhaseGVN* phase) const {
1997   Node* mem = in(MemNode::Memory);
1998   Node* value = can_see_stored_value(mem,phase);
1999   if (value != NULL && value->is_Con() &&
2000       !value->bottom_type()->higher_equal(_type)) {
2001     // If the input to the store does not fit with the load's result type,
2002     // it must be truncated. We can't delay until Ideal call since
2003     // a singleton Value is needed for split_thru_phi optimization.
2004     int con = value->get_int();
2005     return TypeInt::make((con << 16) >> 16);
2006   }
2007   return LoadNode::Value(phase);
2008 }
2009 
2010 //=============================================================================
2011 //----------------------------LoadKlassNode::make------------------------------
2012 // Polymorphic factory method:
2013 Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
2014   // sanity check the alias category against the created node type
2015   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2016   assert(adr_type != NULL, "expecting TypeKlassPtr");
2017 #ifdef _LP64
2018   if (adr_type->is_ptr_to_narrowklass()) {
2019     assert(UseCompressedClassPointers, "no compressed klasses");
2020     Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2021     return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2022   }
2023 #endif
2024   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2025   return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2026 }
2027 
2028 //------------------------------Value------------------------------------------
2029 const Type* LoadKlassNode::Value(PhaseGVN* phase) const {
2030   return klass_value_common(phase);
2031 }
2032 
2033 // In most cases, LoadKlassNode does not have the control input set. If the control
2034 // input is set, it must not be removed (by LoadNode::Ideal()).
2035 bool LoadKlassNode::can_remove_control() const {
2036   return false;
2037 }
2038 
2039 const Type* LoadNode::klass_value_common(PhaseGVN* phase) const {
2040   // Either input is TOP ==> the result is TOP
2041   const Type *t1 = phase->type( in(MemNode::Memory) );
2042   if (t1 == Type::TOP)  return Type::TOP;
2043   Node *adr = in(MemNode::Address);
2044   const Type *t2 = phase->type( adr );
2045   if (t2 == Type::TOP)  return Type::TOP;
2046   const TypePtr *tp = t2->is_ptr();
2047   if (TypePtr::above_centerline(tp->ptr()) ||
2048       tp->ptr() == TypePtr::Null)  return Type::TOP;
2049 
2050   // Return a more precise klass, if possible
2051   const TypeInstPtr *tinst = tp->isa_instptr();
2052   if (tinst != NULL) {
2053     ciInstanceKlass* ik = tinst->klass()->as_instance_klass();
2054     int offset = tinst->offset();
2055     if (ik == phase->C->env()->Class_klass()
2056         && (offset == java_lang_Class::klass_offset_in_bytes() ||
2057             offset == java_lang_Class::array_klass_offset_in_bytes())) {
2058       // We are loading a special hidden field from a Class mirror object,
2059       // the field which points to the VM's Klass metaobject.


2155       // The array's TypeKlassPtr was declared 'precise' or 'not precise'
2156       // according to the element type's subclassing.
2157       return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/);
2158     }
2159     if( klass->is_instance_klass() && tkls->klass_is_exact() &&
2160         tkls->offset() == in_bytes(Klass::super_offset())) {
2161       ciKlass* sup = klass->as_instance_klass()->super();
2162       // The field is Klass::_super.  Return its (constant) value.
2163       // (Folds up the 2nd indirection in aClassConstant.getSuperClass().)
2164       return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR;
2165     }
2166   }
2167 
2168   // Bailout case
2169   return LoadNode::Value(phase);
2170 }
2171 
2172 //------------------------------Identity---------------------------------------
2173 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k.
2174 // Also feed through the klass in Allocate(...klass...)._klass.
2175 Node* LoadKlassNode::Identity(PhaseGVN* phase) {
2176   return klass_identity_common(phase);
2177 }
2178 
2179 Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
2180   Node* x = LoadNode::Identity(phase);
2181   if (x != this)  return x;
2182 
2183   // Take apart the address into an oop and and offset.
2184   // Return 'this' if we cannot.
2185   Node*    adr    = in(MemNode::Address);
2186   intptr_t offset = 0;
2187   Node*    base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2188   if (base == NULL)     return this;
2189   const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
2190   if (toop == NULL)     return this;
2191 
2192   // We can fetch the klass directly through an AllocateNode.
2193   // This works even if the klass is not constant (clone or newArray).
2194   if (offset == oopDesc::klass_offset_in_bytes()) {
2195     Node* allocated_klass = AllocateNode::Ideal_klass(base, phase);
2196     if (allocated_klass != NULL) {
2197       return allocated_klass;
2198     }
2199   }


2214       Node* adr2 = base->in(MemNode::Address);
2215       const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
2216       if (tkls != NULL && !tkls->empty()
2217           && (tkls->klass()->is_instance_klass() ||
2218               tkls->klass()->is_array_klass())
2219           && adr2->is_AddP()
2220           ) {
2221         int mirror_field = in_bytes(Klass::java_mirror_offset());
2222         if (tkls->offset() == mirror_field) {
2223           return adr2->in(AddPNode::Base);
2224         }
2225       }
2226     }
2227   }
2228 
2229   return this;
2230 }
2231 
2232 
2233 //------------------------------Value------------------------------------------
2234 const Type* LoadNKlassNode::Value(PhaseGVN* phase) const {
2235   const Type *t = klass_value_common(phase);
2236   if (t == Type::TOP)
2237     return t;
2238 
2239   return t->make_narrowklass();
2240 }
2241 
2242 //------------------------------Identity---------------------------------------
2243 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2244 // Also feed through the klass in Allocate(...klass...)._klass.
2245 Node* LoadNKlassNode::Identity(PhaseGVN* phase) {
2246   Node *x = klass_identity_common(phase);
2247 
2248   const Type *t = phase->type( x );
2249   if( t == Type::TOP ) return x;
2250   if( t->isa_narrowklass()) return x;
2251   assert (!t->isa_narrowoop(), "no narrow oop here");
2252 
2253   return phase->transform(new EncodePKlassNode(x, t->make_narrowklass()));
2254 }
2255 
2256 //------------------------------Value-----------------------------------------
2257 const Type* LoadRangeNode::Value(PhaseGVN* phase) const {
2258   // Either input is TOP ==> the result is TOP
2259   const Type *t1 = phase->type( in(MemNode::Memory) );
2260   if( t1 == Type::TOP ) return Type::TOP;
2261   Node *adr = in(MemNode::Address);
2262   const Type *t2 = phase->type( adr );
2263   if( t2 == Type::TOP ) return Type::TOP;
2264   const TypePtr *tp = t2->is_ptr();
2265   if (TypePtr::above_centerline(tp->ptr()))  return Type::TOP;
2266   const TypeAryPtr *tap = tp->isa_aryptr();
2267   if( !tap ) return _type;
2268   return tap->size();
2269 }
2270 
2271 //-------------------------------Ideal---------------------------------------
2272 // Feed through the length in AllocateArray(...length...)._length.
2273 Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2274   Node* p = MemNode::Ideal_common(phase, can_reshape);
2275   if (p)  return (p == NodeSentinel) ? NULL : p;
2276 
2277   // Take apart the address into an oop and and offset.


2285 
2286   // We can fetch the length directly through an AllocateArrayNode.
2287   // This works even if the length is not constant (clone or newArray).
2288   if (offset == arrayOopDesc::length_offset_in_bytes()) {
2289     AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2290     if (alloc != NULL) {
2291       Node* allocated_length = alloc->Ideal_length();
2292       Node* len = alloc->make_ideal_length(tary, phase);
2293       if (allocated_length != len) {
2294         // New CastII improves on this.
2295         return len;
2296       }
2297     }
2298   }
2299 
2300   return NULL;
2301 }
2302 
2303 //------------------------------Identity---------------------------------------
2304 // Feed through the length in AllocateArray(...length...)._length.
2305 Node* LoadRangeNode::Identity(PhaseGVN* phase) {
2306   Node* x = LoadINode::Identity(phase);
2307   if (x != this)  return x;
2308 
2309   // Take apart the address into an oop and and offset.
2310   // Return 'this' if we cannot.
2311   Node*    adr    = in(MemNode::Address);
2312   intptr_t offset = 0;
2313   Node*    base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
2314   if (base == NULL)     return this;
2315   const TypeAryPtr* tary = phase->type(adr)->isa_aryptr();
2316   if (tary == NULL)     return this;
2317 
2318   // We can fetch the length directly through an AllocateArrayNode.
2319   // This works even if the length is not constant (clone or newArray).
2320   if (offset == arrayOopDesc::length_offset_in_bytes()) {
2321     AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase);
2322     if (alloc != NULL) {
2323       Node* allocated_length = alloc->Ideal_length();
2324       // Do not allow make_ideal_length to allocate a CastII node.
2325       Node* len = alloc->make_ideal_length(tary, phase, false);


2456   if (ReduceFieldZeroing && /*can_reshape &&*/
2457       mem->is_Proj() && mem->in(0)->is_Initialize()) {
2458     InitializeNode* init = mem->in(0)->as_Initialize();
2459     intptr_t offset = init->can_capture_store(this, phase, can_reshape);
2460     if (offset > 0) {
2461       Node* moved = init->capture_store(this, offset, phase, can_reshape);
2462       // If the InitializeNode captured me, it made a raw copy of me,
2463       // and I need to disappear.
2464       if (moved != NULL) {
2465         // %%% hack to ensure that Ideal returns a new node:
2466         mem = MergeMemNode::make(mem);
2467         return mem;             // fold me away
2468       }
2469     }
2470   }
2471 
2472   return NULL;                  // No further progress
2473 }
2474 
2475 //------------------------------Value-----------------------------------------
2476 const Type* StoreNode::Value(PhaseGVN* phase) const {
2477   // Either input is TOP ==> the result is TOP
2478   const Type *t1 = phase->type( in(MemNode::Memory) );
2479   if( t1 == Type::TOP ) return Type::TOP;
2480   const Type *t2 = phase->type( in(MemNode::Address) );
2481   if( t2 == Type::TOP ) return Type::TOP;
2482   const Type *t3 = phase->type( in(MemNode::ValueIn) );
2483   if( t3 == Type::TOP ) return Type::TOP;
2484   return Type::MEMORY;
2485 }
2486 
2487 //------------------------------Identity---------------------------------------
2488 // Remove redundant stores:
2489 //   Store(m, p, Load(m, p)) changes to m.
2490 //   Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x).
2491 Node* StoreNode::Identity(PhaseGVN* phase) {
2492   Node* mem = in(MemNode::Memory);
2493   Node* adr = in(MemNode::Address);
2494   Node* val = in(MemNode::ValueIn);
2495 
2496   // Load then Store?  Then the Store is useless
2497   if (val->is_Load() &&
2498       val->in(MemNode::Address)->eqv_uncast(adr) &&
2499       val->in(MemNode::Memory )->eqv_uncast(mem) &&
2500       val->as_Load()->store_Opcode() == Opcode()) {
2501     return mem;
2502   }
2503 
2504   // Two stores in a row of the same value?
2505   if (mem->is_Store() &&
2506       mem->in(MemNode::Address)->eqv_uncast(adr) &&
2507       mem->in(MemNode::ValueIn)->eqv_uncast(val) &&
2508       mem->Opcode() == Opcode()) {
2509     return mem;
2510   }
2511 


2625   return StoreNode::Ideal(phase, can_reshape);
2626 }
2627 
2628 //=============================================================================
2629 //------------------------------Ideal------------------------------------------
2630 // If the store is from an AND mask that leaves the low bits untouched, then
2631 // we can skip the AND operation
2632 Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){
2633   Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF);
2634   if( progress != NULL ) return progress;
2635 
2636   progress = StoreNode::Ideal_sign_extended_input(phase, 16);
2637   if( progress != NULL ) return progress;
2638 
2639   // Finally check the default case
2640   return StoreNode::Ideal(phase, can_reshape);
2641 }
2642 
2643 //=============================================================================
2644 //------------------------------Identity---------------------------------------
2645 Node* StoreCMNode::Identity(PhaseGVN* phase) {
2646   // No need to card mark when storing a null ptr
2647   Node* my_store = in(MemNode::OopStore);
2648   if (my_store->is_Store()) {
2649     const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
2650     if( t1 == TypePtr::NULL_PTR ) {
2651       return in(MemNode::Memory);
2652     }
2653   }
2654   return this;
2655 }
2656 
2657 //=============================================================================
2658 //------------------------------Ideal---------------------------------------
2659 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
2660   Node* progress = StoreNode::Ideal(phase, can_reshape);
2661   if (progress != NULL) return progress;
2662 
2663   Node* my_store = in(MemNode::OopStore);
2664   if (my_store->is_MergeMem()) {
2665     Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
2666     set_req(MemNode::OopStore, mem);
2667     return this;
2668   }
2669 
2670   return NULL;
2671 }
2672 
2673 //------------------------------Value-----------------------------------------
2674 const Type* StoreCMNode::Value(PhaseGVN* phase) const {
2675   // Either input is TOP ==> the result is TOP
2676   const Type *t = phase->type( in(MemNode::Memory) );
2677   if( t == Type::TOP ) return Type::TOP;
2678   t = phase->type( in(MemNode::Address) );
2679   if( t == Type::TOP ) return Type::TOP;
2680   t = phase->type( in(MemNode::ValueIn) );
2681   if( t == Type::TOP ) return Type::TOP;
2682   // If extra input is TOP ==> the result is TOP
2683   t = phase->type( in(MemNode::OopStore) );
2684   if( t == Type::TOP ) return Type::TOP;
2685 
2686   return StoreNode::Value( phase );
2687 }
2688 
2689 
2690 //=============================================================================
2691 //----------------------------------SCMemProjNode------------------------------
2692 const Type* SCMemProjNode::Value(PhaseGVN* phase) const
2693 {
2694   return bottom_type();
2695 }
2696 
2697 //=============================================================================
2698 //----------------------------------LoadStoreNode------------------------------
2699 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
2700   : Node(required),
2701     _type(rt),
2702     _adr_type(at)
2703 {
2704   init_req(MemNode::Control, c  );
2705   init_req(MemNode::Memory , mem);
2706   init_req(MemNode::Address, adr);
2707   init_req(MemNode::ValueIn, val);
2708   init_class_id(Class_LoadStore);
2709 }
2710 
2711 uint LoadStoreNode::ideal_reg() const {
2712   return _type->ideal_reg();


2728 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) {
2729   init_req(ExpectedIn, ex );
2730 }
2731 
2732 //=============================================================================
2733 //-------------------------------adr_type--------------------------------------
2734 const TypePtr* ClearArrayNode::adr_type() const {
2735   Node *adr = in(3);
2736   if (adr == NULL)  return NULL; // node is dead
2737   return MemNode::calculate_adr_type(adr->bottom_type());
2738 }
2739 
2740 //------------------------------match_edge-------------------------------------
2741 // Do we Match on this edge index or not?  Do not match memory
2742 uint ClearArrayNode::match_edge(uint idx) const {
2743   return idx > 1;
2744 }
2745 
2746 //------------------------------Identity---------------------------------------
2747 // Clearing a zero length array does nothing
2748 Node* ClearArrayNode::Identity(PhaseGVN* phase) {
2749   return phase->type(in(2))->higher_equal(TypeX::ZERO)  ? in(1) : this;
2750 }
2751 
2752 //------------------------------Idealize---------------------------------------
2753 // Clearing a short array is faster with stores
2754 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
2755   const int unit = BytesPerLong;
2756   const TypeX* t = phase->type(in(2))->isa_intptr_t();
2757   if (!t)  return NULL;
2758   if (!t->is_con())  return NULL;
2759   intptr_t raw_count = t->get_con();
2760   intptr_t size = raw_count;
2761   if (!Matcher::init_array_count_is_in_bytes) size *= unit;
2762   // Clearing nothing uses the Identity call.
2763   // Negative clears are possible on dead ClearArrays
2764   // (see jck test stmt114.stmt11402.val).
2765   if (size <= 0 || size % unit != 0)  return NULL;
2766   intptr_t count = size / unit;
2767   // Length too long; use fast hardware clear
2768   if (size > Matcher::init_array_short_size)  return NULL;


2984       if ((alloc != NULL) && alloc->is_Allocate() &&
2985           alloc->as_Allocate()->does_not_escape_thread()) {
2986         // The allocated object does not escape.
2987         eliminate = true;
2988       }
2989     }
2990     if (eliminate) {
2991       // Replace MemBar projections by its inputs.
2992       PhaseIterGVN* igvn = phase->is_IterGVN();
2993       igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
2994       igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
2995       // Must return either the original node (now dead) or a new node
2996       // (Do not return a top here, since that would break the uniqueness of top.)
2997       return new ConINode(TypeInt::ZERO);
2998     }
2999   }
3000   return progress ? this : NULL;
3001 }
3002 
3003 //------------------------------Value------------------------------------------
3004 const Type* MemBarNode::Value(PhaseGVN* phase) const {
3005   if( !in(0) ) return Type::TOP;
3006   if( phase->type(in(0)) == Type::TOP )
3007     return Type::TOP;
3008   return TypeTuple::MEMBAR;
3009 }
3010 
3011 //------------------------------match------------------------------------------
3012 // Construct projections for memory.
3013 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3014   switch (proj->_con) {
3015   case TypeFunc::Control:
3016   case TypeFunc::Memory:
3017     return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3018   }
3019   ShouldNotReachHere();
3020   return NULL;
3021 }
3022 
3023 //===========================InitializeNode====================================
3024 // SUMMARY:


4126     }
4127     assert(base_memory() == mdef->base_memory(), "");
4128   } else {
4129     set_base_memory(new_base);
4130   }
4131 }
4132 
4133 // Make a new, untransformed MergeMem with the same base as 'mem'.
4134 // If mem is itself a MergeMem, populate the result with the same edges.
4135 MergeMemNode* MergeMemNode::make(Node* mem) {
4136   return new MergeMemNode(mem);
4137 }
4138 
4139 //------------------------------cmp--------------------------------------------
4140 uint MergeMemNode::hash() const { return NO_HASH; }
4141 uint MergeMemNode::cmp( const Node &n ) const {
4142   return (&n == this);          // Always fail except on self
4143 }
4144 
4145 //------------------------------Identity---------------------------------------
4146 Node* MergeMemNode::Identity(PhaseGVN* phase) {
4147   // Identity if this merge point does not record any interesting memory
4148   // disambiguations.
4149   Node* base_mem = base_memory();
4150   Node* empty_mem = empty_memory();
4151   if (base_mem != empty_mem) {  // Memory path is not dead?
4152     for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4153       Node* mem = in(i);
4154       if (mem != empty_mem && mem != base_mem) {
4155         return this;            // Many memory splits; no change
4156       }
4157     }
4158   }
4159   return base_mem;              // No memory splits; ID on the one true input
4160 }
4161 
4162 //------------------------------Ideal------------------------------------------
4163 // This method is invoked recursively on chains of MergeMem nodes
4164 Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) {
4165   // Remove chain'd MergeMems
4166   //


< prev index next >