891 #endif
892
893 //----------------------------LoadNode::make-----------------------------------
894 // Polymorphic factory method:
895 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) {
896 Compile* C = gvn.C;
897
898 // sanity check the alias category against the created node type
899 assert(!(adr_type->isa_oopptr() &&
900 adr_type->offset() == oopDesc::klass_offset_in_bytes()),
901 "use LoadKlassNode instead");
902 assert(!(adr_type->isa_aryptr() &&
903 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
904 "use LoadRangeNode instead");
905 // Check control edge of raw loads
906 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
907 // oop will be recorded in oop map if load crosses safepoint
908 rt->isa_oopptr() || is_immutable_value(adr),
909 "raw memory operations should have control edge");
910 switch (bt) {
911 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
912 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
913 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo);
914 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
915 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
916 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo);
917 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo);
918 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo);
919 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo);
920 case T_OBJECT:
921 #ifdef _LP64
922 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
923 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo));
924 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
925 } else
926 #endif
927 {
928 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
929 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo);
930 }
931 }
932 ShouldNotReachHere();
933 return (LoadNode*)NULL;
934 }
935
936 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
937 bool require_atomic = true;
938 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
939 }
940
941 LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
942 bool require_atomic = true;
943 return new (C) LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic);
944 }
945
946
947
948 //------------------------------hash-------------------------------------------
949 uint LoadNode::hash() const {
950 // unroll addition of interesting fields
951 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
952 }
953
954 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
955 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
956 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
957 bool is_stable_ary = FoldStableValues &&
958 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
959 tp->isa_aryptr()->is_stable();
960
961 return (eliminate_boxing && non_volatile) || is_stable_ary;
962 }
963
1211 if (ik->nof_nonstatic_fields() == 1) {
1212 // This should be true nonstatic_field_at requires calling
1213 // nof_nonstatic_fields so check it anyway
1214 ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1215 BasicType bt = c.basic_type();
1216 // Only integer types have boxing cache.
1217 assert(bt == T_BOOLEAN || bt == T_CHAR ||
1218 bt == T_BYTE || bt == T_SHORT ||
1219 bt == T_INT || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt)));
1220 jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1221 if (cache_low != (int)cache_low) {
1222 return NULL; // should not happen since cache is array indexed by value
1223 }
1224 jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
1225 if (offset != (int)offset) {
1226 return NULL; // should not happen since cache is array indexed by value
1227 }
1228 // Add up all the offsets making of the address of the load
1229 Node* result = elements[0];
1230 for (int i = 1; i < count; i++) {
1231 result = phase->transform(new (phase->C) AddXNode(result, elements[i]));
1232 }
1233 // Remove the constant offset from the address and then
1234 result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-(int)offset)));
1235 // remove the scaling of the offset to recover the original index.
1236 if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
1237 // Peel the shift off directly but wrap it in a dummy node
1238 // since Ideal can't return existing nodes
1239 result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0));
1240 } else if (result->is_Add() && result->in(2)->is_Con() &&
1241 result->in(1)->Opcode() == Op_LShiftX &&
1242 result->in(1)->in(2) == phase->intcon(shift)) {
1243 // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z)
1244 // but for boxing cache access we know that X<<Z will not overflow
1245 // (there is range check) so we do this optimizatrion by hand here.
1246 Node* add_con = new (phase->C) RShiftXNode(result->in(2), phase->intcon(shift));
1247 result = new (phase->C) AddXNode(result->in(1)->in(1), phase->transform(add_con));
1248 } else {
1249 result = new (phase->C) RShiftXNode(result, phase->intcon(shift));
1250 }
1251 #ifdef _LP64
1252 if (bt != T_LONG) {
1253 result = new (phase->C) ConvL2INode(phase->transform(result));
1254 }
1255 #else
1256 if (bt == T_LONG) {
1257 result = new (phase->C) ConvI2LNode(phase->transform(result));
1258 }
1259 #endif
1260 return result;
1261 }
1262 }
1263 }
1264 }
1265 }
1266 return NULL;
1267 }
1268
1269 static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
1270 Node* region = phi->in(0);
1271 if (region == NULL) {
1272 return false; // Wait stable graph
1273 }
1274 uint cnt = phi->req();
1275 for (uint i = 1; i < cnt; i++) {
1276 Node* rc = region->in(i);
1277 if (rc == NULL || phase->type(rc) == Type::TOP)
1368 region = base->in(0);
1369 } else if (MemNode::all_controls_dominate(address, mem->in(0))) {
1370 region = mem->in(0);
1371 } else {
1372 return NULL; // complex graph
1373 }
1374 } else {
1375 assert(base->in(0) == mem->in(0), "sanity");
1376 region = mem->in(0);
1377 }
1378
1379 const Type* this_type = this->bottom_type();
1380 int this_index = C->get_alias_index(t_oop);
1381 int this_offset = t_oop->offset();
1382 int this_iid = t_oop->instance_id();
1383 if (!t_oop->is_known_instance() && load_boxed_values) {
1384 // Use _idx of address base for boxed values.
1385 this_iid = base->_idx;
1386 }
1387 PhaseIterGVN* igvn = phase->is_IterGVN();
1388 Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
1389 for (uint i = 1; i < region->req(); i++) {
1390 Node* x;
1391 Node* the_clone = NULL;
1392 if (region->in(i) == C->top()) {
1393 x = C->top(); // Dead path? Use a dead data op
1394 } else {
1395 x = this->clone(); // Else clone up the data op
1396 the_clone = x; // Remember for possible deletion.
1397 // Alter data node to use pre-phi inputs
1398 if (this->in(0) == region) {
1399 x->set_req(0, region->in(i));
1400 } else {
1401 x->set_req(0, NULL);
1402 }
1403 if (mem->is_Phi() && (mem->in(0) == region)) {
1404 x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone.
1405 }
1406 if (address->is_Phi() && address->in(0) == region) {
1407 x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone
1408 }
1409 if (base_is_phi && (base->in(0) == region)) {
1410 Node* base_x = base->in(i); // Clone address for loads from boxed objects.
1411 Node* adr_x = phase->transform(new (C) AddPNode(base_x,base_x,address->in(AddPNode::Offset)));
1412 x->set_req(Address, adr_x);
1413 }
1414 }
1415 // Check for a 'win' on some paths
1416 const Type *t = x->Value(igvn);
1417
1418 bool singleton = t->singleton();
1419
1420 // See comments in PhaseIdealLoop::split_thru_phi().
1421 if (singleton && t == Type::TOP) {
1422 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
1423 }
1424
1425 if (singleton) {
1426 x = igvn->makecon(t);
1427 } else {
1428 // We now call Identity to try to simplify the cloned node.
1429 // Note that some Identity methods call phase->type(this).
1430 // Make sure that the type array is big enough for
1431 // our new node, even though we may throw the node away.
1880 return _type;
1881 }
1882
1883 //------------------------------match_edge-------------------------------------
1884 // Do we Match on this edge index or not? Match only the address.
1885 uint LoadNode::match_edge(uint idx) const {
1886 return idx == MemNode::Address;
1887 }
1888
1889 //--------------------------LoadBNode::Ideal--------------------------------------
1890 //
1891 // If the previous store is to the same address as this load,
1892 // and the value stored was larger than a byte, replace this load
1893 // with the value stored truncated to a byte. If no truncation is
1894 // needed, the replacement is done in LoadNode::Identity().
1895 //
1896 Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1897 Node* mem = in(MemNode::Memory);
1898 Node* value = can_see_stored_value(mem,phase);
1899 if( value && !phase->type(value)->higher_equal( _type ) ) {
1900 Node *result = phase->transform( new (phase->C) LShiftINode(value, phase->intcon(24)) );
1901 return new (phase->C) RShiftINode(result, phase->intcon(24));
1902 }
1903 // Identity call will handle the case where truncation is not needed.
1904 return LoadNode::Ideal(phase, can_reshape);
1905 }
1906
1907 const Type* LoadBNode::Value(PhaseTransform *phase) const {
1908 Node* mem = in(MemNode::Memory);
1909 Node* value = can_see_stored_value(mem,phase);
1910 if (value != NULL && value->is_Con() &&
1911 !value->bottom_type()->higher_equal(_type)) {
1912 // If the input to the store does not fit with the load's result type,
1913 // it must be truncated. We can't delay until Ideal call since
1914 // a singleton Value is needed for split_thru_phi optimization.
1915 int con = value->get_int();
1916 return TypeInt::make((con << 24) >> 24);
1917 }
1918 return LoadNode::Value(phase);
1919 }
1920
1921 //--------------------------LoadUBNode::Ideal-------------------------------------
1922 //
1923 // If the previous store is to the same address as this load,
1924 // and the value stored was larger than a byte, replace this load
1925 // with the value stored truncated to a byte. If no truncation is
1926 // needed, the replacement is done in LoadNode::Identity().
1927 //
1928 Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1929 Node* mem = in(MemNode::Memory);
1930 Node* value = can_see_stored_value(mem, phase);
1931 if (value && !phase->type(value)->higher_equal(_type))
1932 return new (phase->C) AndINode(value, phase->intcon(0xFF));
1933 // Identity call will handle the case where truncation is not needed.
1934 return LoadNode::Ideal(phase, can_reshape);
1935 }
1936
1937 const Type* LoadUBNode::Value(PhaseTransform *phase) const {
1938 Node* mem = in(MemNode::Memory);
1939 Node* value = can_see_stored_value(mem,phase);
1940 if (value != NULL && value->is_Con() &&
1941 !value->bottom_type()->higher_equal(_type)) {
1942 // If the input to the store does not fit with the load's result type,
1943 // it must be truncated. We can't delay until Ideal call since
1944 // a singleton Value is needed for split_thru_phi optimization.
1945 int con = value->get_int();
1946 return TypeInt::make(con & 0xFF);
1947 }
1948 return LoadNode::Value(phase);
1949 }
1950
1951 //--------------------------LoadUSNode::Ideal-------------------------------------
1952 //
1953 // If the previous store is to the same address as this load,
1954 // and the value stored was larger than a char, replace this load
1955 // with the value stored truncated to a char. If no truncation is
1956 // needed, the replacement is done in LoadNode::Identity().
1957 //
1958 Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1959 Node* mem = in(MemNode::Memory);
1960 Node* value = can_see_stored_value(mem,phase);
1961 if( value && !phase->type(value)->higher_equal( _type ) )
1962 return new (phase->C) AndINode(value,phase->intcon(0xFFFF));
1963 // Identity call will handle the case where truncation is not needed.
1964 return LoadNode::Ideal(phase, can_reshape);
1965 }
1966
1967 const Type* LoadUSNode::Value(PhaseTransform *phase) const {
1968 Node* mem = in(MemNode::Memory);
1969 Node* value = can_see_stored_value(mem,phase);
1970 if (value != NULL && value->is_Con() &&
1971 !value->bottom_type()->higher_equal(_type)) {
1972 // If the input to the store does not fit with the load's result type,
1973 // it must be truncated. We can't delay until Ideal call since
1974 // a singleton Value is needed for split_thru_phi optimization.
1975 int con = value->get_int();
1976 return TypeInt::make(con & 0xFFFF);
1977 }
1978 return LoadNode::Value(phase);
1979 }
1980
1981 //--------------------------LoadSNode::Ideal--------------------------------------
1982 //
1983 // If the previous store is to the same address as this load,
1984 // and the value stored was larger than a short, replace this load
1985 // with the value stored truncated to a short. If no truncation is
1986 // needed, the replacement is done in LoadNode::Identity().
1987 //
1988 Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1989 Node* mem = in(MemNode::Memory);
1990 Node* value = can_see_stored_value(mem,phase);
1991 if( value && !phase->type(value)->higher_equal( _type ) ) {
1992 Node *result = phase->transform( new (phase->C) LShiftINode(value, phase->intcon(16)) );
1993 return new (phase->C) RShiftINode(result, phase->intcon(16));
1994 }
1995 // Identity call will handle the case where truncation is not needed.
1996 return LoadNode::Ideal(phase, can_reshape);
1997 }
1998
1999 const Type* LoadSNode::Value(PhaseTransform *phase) const {
2000 Node* mem = in(MemNode::Memory);
2001 Node* value = can_see_stored_value(mem,phase);
2002 if (value != NULL && value->is_Con() &&
2003 !value->bottom_type()->higher_equal(_type)) {
2004 // If the input to the store does not fit with the load's result type,
2005 // it must be truncated. We can't delay until Ideal call since
2006 // a singleton Value is needed for split_thru_phi optimization.
2007 int con = value->get_int();
2008 return TypeInt::make((con << 16) >> 16);
2009 }
2010 return LoadNode::Value(phase);
2011 }
2012
2013 //=============================================================================
2014 //----------------------------LoadKlassNode::make------------------------------
2015 // Polymorphic factory method:
2016 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
2017 Compile* C = gvn.C;
2018 Node *ctl = NULL;
2019 // sanity check the alias category against the created node type
2020 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2021 assert(adr_type != NULL, "expecting TypeKlassPtr");
2022 #ifdef _LP64
2023 if (adr_type->is_ptr_to_narrowklass()) {
2024 assert(UseCompressedClassPointers, "no compressed klasses");
2025 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2026 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2027 }
2028 #endif
2029 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2030 return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2031 }
2032
2033 //------------------------------Value------------------------------------------
2034 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
2035 return klass_value_common(phase);
2036 }
2037
2038 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
2039 // Either input is TOP ==> the result is TOP
2040 const Type *t1 = phase->type( in(MemNode::Memory) );
2041 if (t1 == Type::TOP) return Type::TOP;
2042 Node *adr = in(MemNode::Address);
2043 const Type *t2 = phase->type( adr );
2044 if (t2 == Type::TOP) return Type::TOP;
2045 const TypePtr *tp = t2->is_ptr();
2046 if (TypePtr::above_centerline(tp->ptr()) ||
2047 tp->ptr() == TypePtr::Null) return Type::TOP;
2048
2049 // Return a more precise klass, if possible
2050 const TypeInstPtr *tinst = tp->isa_instptr();
2238 //------------------------------Value------------------------------------------
2239 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const {
2240 const Type *t = klass_value_common(phase);
2241 if (t == Type::TOP)
2242 return t;
2243
2244 return t->make_narrowklass();
2245 }
2246
2247 //------------------------------Identity---------------------------------------
2248 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2249 // Also feed through the klass in Allocate(...klass...)._klass.
2250 Node* LoadNKlassNode::Identity( PhaseTransform *phase ) {
2251 Node *x = klass_identity_common(phase);
2252
2253 const Type *t = phase->type( x );
2254 if( t == Type::TOP ) return x;
2255 if( t->isa_narrowklass()) return x;
2256 assert (!t->isa_narrowoop(), "no narrow oop here");
2257
2258 return phase->transform(new (phase->C) EncodePKlassNode(x, t->make_narrowklass()));
2259 }
2260
2261 //------------------------------Value-----------------------------------------
2262 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
2263 // Either input is TOP ==> the result is TOP
2264 const Type *t1 = phase->type( in(MemNode::Memory) );
2265 if( t1 == Type::TOP ) return Type::TOP;
2266 Node *adr = in(MemNode::Address);
2267 const Type *t2 = phase->type( adr );
2268 if( t2 == Type::TOP ) return Type::TOP;
2269 const TypePtr *tp = t2->is_ptr();
2270 if (TypePtr::above_centerline(tp->ptr())) return Type::TOP;
2271 const TypeAryPtr *tap = tp->isa_aryptr();
2272 if( !tap ) return _type;
2273 return tap->size();
2274 }
2275
2276 //-------------------------------Ideal---------------------------------------
2277 // Feed through the length in AllocateArray(...length...)._length.
2278 Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2333 return allocated_length;
2334 }
2335 }
2336 }
2337
2338 return this;
2339
2340 }
2341
2342 //=============================================================================
2343 //---------------------------StoreNode::make-----------------------------------
2344 // Polymorphic factory method:
2345 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2346 assert((mo == unordered || mo == release), "unexpected");
2347 Compile* C = gvn.C;
2348 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2349 ctl != NULL, "raw memory operations should have control edge");
2350
2351 switch (bt) {
2352 case T_BOOLEAN:
2353 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo);
2354 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo);
2355 case T_CHAR:
2356 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo);
2357 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo);
2358 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo);
2359 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo);
2360 case T_METADATA:
2361 case T_ADDRESS:
2362 case T_OBJECT:
2363 #ifdef _LP64
2364 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2365 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
2366 return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo);
2367 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2368 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2369 adr->bottom_type()->isa_rawptr())) {
2370 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2371 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2372 }
2373 #endif
2374 {
2375 return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo);
2376 }
2377 }
2378 ShouldNotReachHere();
2379 return (StoreNode*)NULL;
2380 }
2381
2382 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2383 bool require_atomic = true;
2384 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2385 }
2386
2387 StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2388 bool require_atomic = true;
2389 return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2390 }
2391
2392
2393 //--------------------------bottom_type----------------------------------------
2394 const Type *StoreNode::bottom_type() const {
2395 return Type::MEMORY;
2396 }
2397
2398 //------------------------------hash-------------------------------------------
2399 uint StoreNode::hash() const {
2400 // unroll addition of interesting fields
2401 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2402
2403 // Since they are not commoned, do not hash them:
2404 return NO_HASH;
2405 }
2406
2407 //------------------------------Ideal------------------------------------------
2408 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2409 // When a store immediately follows a relevant allocation/initialization,
2762 // (see jck test stmt114.stmt11402.val).
2763 if (size <= 0 || size % unit != 0) return NULL;
2764 intptr_t count = size / unit;
2765 // Length too long; use fast hardware clear
2766 if (size > Matcher::init_array_short_size) return NULL;
2767 Node *mem = in(1);
2768 if( phase->type(mem)==Type::TOP ) return NULL;
2769 Node *adr = in(3);
2770 const Type* at = phase->type(adr);
2771 if( at==Type::TOP ) return NULL;
2772 const TypePtr* atp = at->isa_ptr();
2773 // adjust atp to be the correct array element address type
2774 if (atp == NULL) atp = TypePtr::BOTTOM;
2775 else atp = atp->add_offset(Type::OffsetBot);
2776 // Get base for derived pointer purposes
2777 if( adr->Opcode() != Op_AddP ) Unimplemented();
2778 Node *base = adr->in(1);
2779
2780 Node *zero = phase->makecon(TypeLong::ZERO);
2781 Node *off = phase->MakeConX(BytesPerLong);
2782 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2783 count--;
2784 while( count-- ) {
2785 mem = phase->transform(mem);
2786 adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
2787 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2788 }
2789 return mem;
2790 }
2791
2792 //----------------------------step_through----------------------------------
2793 // Return allocation input memory edge if it is different instance
2794 // or itself if it is the one we are looking for.
2795 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
2796 Node* n = *np;
2797 assert(n->is_ClearArray(), "sanity");
2798 intptr_t offset;
2799 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
2800 // This method is called only before Allocate nodes are expanded during
2801 // macro nodes expansion. Before that ClearArray nodes are only generated
2802 // in LibraryCallKit::generate_arraycopy() which follows allocations.
2803 assert(alloc != NULL, "should have allocation");
2804 if (alloc->_idx == instance_id) {
2805 // Can not bypass initialization of the instance we are looking for.
2806 return false;
2807 }
2808 // Otherwise skip it.
2809 InitializeNode* init = alloc->initialization();
2810 if (init != NULL)
2811 *np = init->in(TypeFunc::Memory);
2812 else
2813 *np = alloc->in(TypeFunc::Memory);
2814 return true;
2815 }
2816
2817 //----------------------------clear_memory-------------------------------------
2818 // Generate code to initialize object storage to zero.
2819 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2820 intptr_t start_offset,
2821 Node* end_offset,
2822 PhaseGVN* phase) {
2823 Compile* C = phase->C;
2824 intptr_t offset = start_offset;
2825
2826 int unit = BytesPerLong;
2827 if ((offset % unit) != 0) {
2828 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
2829 adr = phase->transform(adr);
2830 const TypePtr* atp = TypeRawPtr::BOTTOM;
2831 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
2832 mem = phase->transform(mem);
2833 offset += BytesPerInt;
2834 }
2835 assert((offset % unit) == 0, "");
2836
2837 // Initialize the remaining stuff, if any, with a ClearArray.
2838 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
2839 }
2840
2841 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2842 Node* start_offset,
2843 Node* end_offset,
2844 PhaseGVN* phase) {
2845 if (start_offset == end_offset) {
2846 // nothing to do
2847 return mem;
2848 }
2849
2850 Compile* C = phase->C;
2851 int unit = BytesPerLong;
2852 Node* zbase = start_offset;
2853 Node* zend = end_offset;
2854
2855 // Scale to the unit required by the CPU:
2856 if (!Matcher::init_array_count_is_in_bytes) {
2857 Node* shift = phase->intcon(exact_log2(unit));
2858 zbase = phase->transform( new(C) URShiftXNode(zbase, shift) );
2859 zend = phase->transform( new(C) URShiftXNode(zend, shift) );
2860 }
2861
2862 // Bulk clear double-words
2863 Node* zsize = phase->transform( new(C) SubXNode(zend, zbase) );
2864 Node* adr = phase->transform( new(C) AddPNode(dest, dest, start_offset) );
2865 mem = new (C) ClearArrayNode(ctl, mem, zsize, adr);
2866 return phase->transform(mem);
2867 }
2868
2869 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2870 intptr_t start_offset,
2871 intptr_t end_offset,
2872 PhaseGVN* phase) {
2873 if (start_offset == end_offset) {
2874 // nothing to do
2875 return mem;
2876 }
2877
2878 Compile* C = phase->C;
2879 assert((end_offset % BytesPerInt) == 0, "odd end offset");
2880 intptr_t done_offset = end_offset;
2881 if ((done_offset % BytesPerLong) != 0) {
2882 done_offset -= BytesPerInt;
2883 }
2884 if (done_offset > start_offset) {
2885 mem = clear_memory(ctl, mem, dest,
2886 start_offset, phase->MakeConX(done_offset), phase);
2887 }
2888 if (done_offset < end_offset) { // emit the final 32-bit store
2889 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
2890 adr = phase->transform(adr);
2891 const TypePtr* atp = TypeRawPtr::BOTTOM;
2892 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
2893 mem = phase->transform(mem);
2894 done_offset += BytesPerInt;
2895 }
2896 assert(done_offset == end_offset, "");
2897 return mem;
2898 }
2899
2900 //=============================================================================
2901 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
2902 : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
2903 _adr_type(C->get_adr_type(alias_idx))
2904 {
2905 init_class_id(Class_MemBar);
2906 Node* top = C->top();
2907 init_req(TypeFunc::I_O,top);
2908 init_req(TypeFunc::FramePtr,top);
2909 init_req(TypeFunc::ReturnAdr,top);
2910 if (precedent != NULL)
2911 init_req(TypeFunc::Parms, precedent);
2912 }
2913
2914 //------------------------------cmp--------------------------------------------
2915 uint MemBarNode::hash() const { return NO_HASH; }
2916 uint MemBarNode::cmp( const Node &n ) const {
2917 return (&n == this); // Always fail except on self
2918 }
2919
2920 //------------------------------make-------------------------------------------
2921 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
2922 switch (opcode) {
2923 case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn);
2924 case Op_LoadFence: return new(C) LoadFenceNode(C, atp, pn);
2925 case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn);
2926 case Op_StoreFence: return new(C) StoreFenceNode(C, atp, pn);
2927 case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
2928 case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
2929 case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn);
2930 case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn);
2931 case Op_Initialize: return new(C) InitializeNode(C, atp, pn);
2932 case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn);
2933 default: ShouldNotReachHere(); return NULL;
2934 }
2935 }
2936
2937 //------------------------------Ideal------------------------------------------
2938 // Return a node which is more "ideal" than the current node. Strip out
2939 // control copies
2940 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2941 if (remove_dead_region(phase, can_reshape)) return this;
2942 // Don't bother trying to transform a dead node
2943 if (in(0) && in(0)->is_top()) {
2944 return NULL;
2945 }
2946
2947 // Eliminate volatile MemBars for scalar replaced objects.
2948 if (can_reshape && req() == (Precedent+1)) {
2949 bool eliminate = false;
2950 int opc = Opcode();
2951 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
2952 // Volatile field loads and stores.
2975 t_oop->offset() != Type::OffsetTop) {
2976 eliminate = true;
2977 }
2978 }
2979 } else if (opc == Op_MemBarRelease) {
2980 // Final field stores.
2981 Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
2982 if ((alloc != NULL) && alloc->is_Allocate() &&
2983 alloc->as_Allocate()->_is_non_escaping) {
2984 // The allocated object does not escape.
2985 eliminate = true;
2986 }
2987 }
2988 if (eliminate) {
2989 // Replace MemBar projections by its inputs.
2990 PhaseIterGVN* igvn = phase->is_IterGVN();
2991 igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
2992 igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
2993 // Must return either the original node (now dead) or a new node
2994 // (Do not return a top here, since that would break the uniqueness of top.)
2995 return new (phase->C) ConINode(TypeInt::ZERO);
2996 }
2997 }
2998 return NULL;
2999 }
3000
3001 //------------------------------Value------------------------------------------
3002 const Type *MemBarNode::Value( PhaseTransform *phase ) const {
3003 if( !in(0) ) return Type::TOP;
3004 if( phase->type(in(0)) == Type::TOP )
3005 return Type::TOP;
3006 return TypeTuple::MEMBAR;
3007 }
3008
3009 //------------------------------match------------------------------------------
3010 // Construct projections for memory.
3011 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3012 switch (proj->_con) {
3013 case TypeFunc::Control:
3014 case TypeFunc::Memory:
3015 return new (m->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3016 }
3017 ShouldNotReachHere();
3018 return NULL;
3019 }
3020
3021 //===========================InitializeNode====================================
3022 // SUMMARY:
3023 // This node acts as a memory barrier on raw memory, after some raw stores.
3024 // The 'cooked' oop value feeds from the Initialize, not the Allocation.
3025 // The Initialize can 'capture' suitably constrained stores as raw inits.
3026 // It can coalesce related raw stores into larger units (called 'tiles').
3027 // It can avoid zeroing new storage for memory units which have raw inits.
3028 // At macro-expansion, it is marked 'complete', and does not optimize further.
3029 //
3030 // EXAMPLE:
3031 // The object 'new short[2]' occupies 16 bytes in a 32-bit machine.
3032 // ctl = incoming control; mem* = incoming memory
3033 // (Note: A star * on a memory edge denotes I/O and other standard edges.)
3034 // First allocate uninitialized memory and fill in the header:
3035 // alloc = (Allocate ctl mem* 16 #short[].klass ...)
3421 PhaseTransform* phase) {
3422 assert(stores_are_sane(phase), "");
3423 int i = captured_store_insertion_point(start, size_in_bytes, phase);
3424 if (i == 0) {
3425 return NULL; // something is dead
3426 } else if (i < 0) {
3427 return zero_memory(); // just primordial zero bits here
3428 } else {
3429 Node* st = in(i); // here is the store at this position
3430 assert(get_store_offset(st->as_Store(), phase) == start, "sanity");
3431 return st;
3432 }
3433 }
3434
3435 // Create, as a raw pointer, an address within my new object at 'offset'.
3436 Node* InitializeNode::make_raw_address(intptr_t offset,
3437 PhaseTransform* phase) {
3438 Node* addr = in(RawAddress);
3439 if (offset != 0) {
3440 Compile* C = phase->C;
3441 addr = phase->transform( new (C) AddPNode(C->top(), addr,
3442 phase->MakeConX(offset)) );
3443 }
3444 return addr;
3445 }
3446
3447 // Clone the given store, converting it into a raw store
3448 // initializing a field or element of my new object.
3449 // Caller is responsible for retiring the original store,
3450 // with subsume_node or the like.
3451 //
3452 // From the example above InitializeNode::InitializeNode,
3453 // here are the old stores to be captured:
3454 // store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
3455 // store2 = (StoreC init.Control store1 (+ oop 14) 2)
3456 //
3457 // Here is the changed code; note the extra edges on init:
3458 // alloc = (Allocate ...)
3459 // rawoop = alloc.RawAddress
3460 // rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1)
3461 // rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2)
4110 for (uint i = Compile::AliasIdxTop; i < req(); i++) {
4111 init_req(i,empty_mem);
4112 }
4113 assert(empty_memory() == empty_mem, "");
4114
4115 if( new_base != NULL && new_base->is_MergeMem() ) {
4116 MergeMemNode* mdef = new_base->as_MergeMem();
4117 assert(mdef->empty_memory() == empty_mem, "consistent sentinels");
4118 for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) {
4119 mms.set_memory(mms.memory2());
4120 }
4121 assert(base_memory() == mdef->base_memory(), "");
4122 } else {
4123 set_base_memory(new_base);
4124 }
4125 }
4126
4127 // Make a new, untransformed MergeMem with the same base as 'mem'.
4128 // If mem is itself a MergeMem, populate the result with the same edges.
4129 MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) {
4130 return new(C) MergeMemNode(mem);
4131 }
4132
4133 //------------------------------cmp--------------------------------------------
4134 uint MergeMemNode::hash() const { return NO_HASH; }
4135 uint MergeMemNode::cmp( const Node &n ) const {
4136 return (&n == this); // Always fail except on self
4137 }
4138
4139 //------------------------------Identity---------------------------------------
4140 Node* MergeMemNode::Identity(PhaseTransform *phase) {
4141 // Identity if this merge point does not record any interesting memory
4142 // disambiguations.
4143 Node* base_mem = base_memory();
4144 Node* empty_mem = empty_memory();
4145 if (base_mem != empty_mem) { // Memory path is not dead?
4146 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4147 Node* mem = in(i);
4148 if (mem != empty_mem && mem != base_mem) {
4149 return this; // Many memory splits; no change
4150 }
|
891 #endif
892
893 //----------------------------LoadNode::make-----------------------------------
894 // Polymorphic factory method:
895 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) {
896 Compile* C = gvn.C;
897
898 // sanity check the alias category against the created node type
899 assert(!(adr_type->isa_oopptr() &&
900 adr_type->offset() == oopDesc::klass_offset_in_bytes()),
901 "use LoadKlassNode instead");
902 assert(!(adr_type->isa_aryptr() &&
903 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()),
904 "use LoadRangeNode instead");
905 // Check control edge of raw loads
906 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
907 // oop will be recorded in oop map if load crosses safepoint
908 rt->isa_oopptr() || is_immutable_value(adr),
909 "raw memory operations should have control edge");
910 switch (bt) {
911 case T_BOOLEAN: return new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
912 case T_BYTE: return new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
913 case T_INT: return new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo);
914 case T_CHAR: return new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
915 case T_SHORT: return new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
916 case T_LONG: return new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo);
917 case T_FLOAT: return new LoadFNode (ctl, mem, adr, adr_type, rt, mo);
918 case T_DOUBLE: return new LoadDNode (ctl, mem, adr, adr_type, rt, mo);
919 case T_ADDRESS: return new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo);
920 case T_OBJECT:
921 #ifdef _LP64
922 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
923 Node* load = gvn.transform(new LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo));
924 return new DecodeNNode(load, load->bottom_type()->make_ptr());
925 } else
926 #endif
927 {
928 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
929 return new LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo);
930 }
931 }
932 ShouldNotReachHere();
933 return (LoadNode*)NULL;
934 }
935
936 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
937 bool require_atomic = true;
938 return new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
939 }
940
941 LoadDNode* LoadDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
942 bool require_atomic = true;
943 return new LoadDNode(ctl, mem, adr, adr_type, rt, mo, require_atomic);
944 }
945
946
947
948 //------------------------------hash-------------------------------------------
949 uint LoadNode::hash() const {
950 // unroll addition of interesting fields
951 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
952 }
953
954 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
955 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
956 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
957 bool is_stable_ary = FoldStableValues &&
958 (tp != NULL) && (tp->isa_aryptr() != NULL) &&
959 tp->isa_aryptr()->is_stable();
960
961 return (eliminate_boxing && non_volatile) || is_stable_ary;
962 }
963
1211 if (ik->nof_nonstatic_fields() == 1) {
1212 // This should be true nonstatic_field_at requires calling
1213 // nof_nonstatic_fields so check it anyway
1214 ciConstant c = box->field_value(ik->nonstatic_field_at(0));
1215 BasicType bt = c.basic_type();
1216 // Only integer types have boxing cache.
1217 assert(bt == T_BOOLEAN || bt == T_CHAR ||
1218 bt == T_BYTE || bt == T_SHORT ||
1219 bt == T_INT || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt)));
1220 jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int();
1221 if (cache_low != (int)cache_low) {
1222 return NULL; // should not happen since cache is array indexed by value
1223 }
1224 jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift);
1225 if (offset != (int)offset) {
1226 return NULL; // should not happen since cache is array indexed by value
1227 }
1228 // Add up all the offsets making of the address of the load
1229 Node* result = elements[0];
1230 for (int i = 1; i < count; i++) {
1231 result = phase->transform(new AddXNode(result, elements[i]));
1232 }
1233 // Remove the constant offset from the address and then
1234 result = phase->transform(new AddXNode(result, phase->MakeConX(-(int)offset)));
1235 // remove the scaling of the offset to recover the original index.
1236 if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
1237 // Peel the shift off directly but wrap it in a dummy node
1238 // since Ideal can't return existing nodes
1239 result = new RShiftXNode(result->in(1), phase->intcon(0));
1240 } else if (result->is_Add() && result->in(2)->is_Con() &&
1241 result->in(1)->Opcode() == Op_LShiftX &&
1242 result->in(1)->in(2) == phase->intcon(shift)) {
1243 // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z)
1244 // but for boxing cache access we know that X<<Z will not overflow
1245 // (there is range check) so we do this optimizatrion by hand here.
1246 Node* add_con = new RShiftXNode(result->in(2), phase->intcon(shift));
1247 result = new AddXNode(result->in(1)->in(1), phase->transform(add_con));
1248 } else {
1249 result = new RShiftXNode(result, phase->intcon(shift));
1250 }
1251 #ifdef _LP64
1252 if (bt != T_LONG) {
1253 result = new ConvL2INode(phase->transform(result));
1254 }
1255 #else
1256 if (bt == T_LONG) {
1257 result = new ConvI2LNode(phase->transform(result));
1258 }
1259 #endif
1260 return result;
1261 }
1262 }
1263 }
1264 }
1265 }
1266 return NULL;
1267 }
1268
1269 static bool stable_phi(PhiNode* phi, PhaseGVN *phase) {
1270 Node* region = phi->in(0);
1271 if (region == NULL) {
1272 return false; // Wait stable graph
1273 }
1274 uint cnt = phi->req();
1275 for (uint i = 1; i < cnt; i++) {
1276 Node* rc = region->in(i);
1277 if (rc == NULL || phase->type(rc) == Type::TOP)
1368 region = base->in(0);
1369 } else if (MemNode::all_controls_dominate(address, mem->in(0))) {
1370 region = mem->in(0);
1371 } else {
1372 return NULL; // complex graph
1373 }
1374 } else {
1375 assert(base->in(0) == mem->in(0), "sanity");
1376 region = mem->in(0);
1377 }
1378
1379 const Type* this_type = this->bottom_type();
1380 int this_index = C->get_alias_index(t_oop);
1381 int this_offset = t_oop->offset();
1382 int this_iid = t_oop->instance_id();
1383 if (!t_oop->is_known_instance() && load_boxed_values) {
1384 // Use _idx of address base for boxed values.
1385 this_iid = base->_idx;
1386 }
1387 PhaseIterGVN* igvn = phase->is_IterGVN();
1388 Node* phi = new PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
1389 for (uint i = 1; i < region->req(); i++) {
1390 Node* x;
1391 Node* the_clone = NULL;
1392 if (region->in(i) == C->top()) {
1393 x = C->top(); // Dead path? Use a dead data op
1394 } else {
1395 x = this->clone(); // Else clone up the data op
1396 the_clone = x; // Remember for possible deletion.
1397 // Alter data node to use pre-phi inputs
1398 if (this->in(0) == region) {
1399 x->set_req(0, region->in(i));
1400 } else {
1401 x->set_req(0, NULL);
1402 }
1403 if (mem->is_Phi() && (mem->in(0) == region)) {
1404 x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone.
1405 }
1406 if (address->is_Phi() && address->in(0) == region) {
1407 x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone
1408 }
1409 if (base_is_phi && (base->in(0) == region)) {
1410 Node* base_x = base->in(i); // Clone address for loads from boxed objects.
1411 Node* adr_x = phase->transform(new AddPNode(base_x,base_x,address->in(AddPNode::Offset)));
1412 x->set_req(Address, adr_x);
1413 }
1414 }
1415 // Check for a 'win' on some paths
1416 const Type *t = x->Value(igvn);
1417
1418 bool singleton = t->singleton();
1419
1420 // See comments in PhaseIdealLoop::split_thru_phi().
1421 if (singleton && t == Type::TOP) {
1422 singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
1423 }
1424
1425 if (singleton) {
1426 x = igvn->makecon(t);
1427 } else {
1428 // We now call Identity to try to simplify the cloned node.
1429 // Note that some Identity methods call phase->type(this).
1430 // Make sure that the type array is big enough for
1431 // our new node, even though we may throw the node away.
1880 return _type;
1881 }
1882
1883 //------------------------------match_edge-------------------------------------
1884 // Do we Match on this edge index or not? Match only the address.
1885 uint LoadNode::match_edge(uint idx) const {
1886 return idx == MemNode::Address;
1887 }
1888
1889 //--------------------------LoadBNode::Ideal--------------------------------------
1890 //
1891 // If the previous store is to the same address as this load,
1892 // and the value stored was larger than a byte, replace this load
1893 // with the value stored truncated to a byte. If no truncation is
1894 // needed, the replacement is done in LoadNode::Identity().
1895 //
1896 Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1897 Node* mem = in(MemNode::Memory);
1898 Node* value = can_see_stored_value(mem,phase);
1899 if( value && !phase->type(value)->higher_equal( _type ) ) {
1900 Node *result = phase->transform( new LShiftINode(value, phase->intcon(24)) );
1901 return new RShiftINode(result, phase->intcon(24));
1902 }
1903 // Identity call will handle the case where truncation is not needed.
1904 return LoadNode::Ideal(phase, can_reshape);
1905 }
1906
1907 const Type* LoadBNode::Value(PhaseTransform *phase) const {
1908 Node* mem = in(MemNode::Memory);
1909 Node* value = can_see_stored_value(mem,phase);
1910 if (value != NULL && value->is_Con() &&
1911 !value->bottom_type()->higher_equal(_type)) {
1912 // If the input to the store does not fit with the load's result type,
1913 // it must be truncated. We can't delay until Ideal call since
1914 // a singleton Value is needed for split_thru_phi optimization.
1915 int con = value->get_int();
1916 return TypeInt::make((con << 24) >> 24);
1917 }
1918 return LoadNode::Value(phase);
1919 }
1920
1921 //--------------------------LoadUBNode::Ideal-------------------------------------
1922 //
1923 // If the previous store is to the same address as this load,
1924 // and the value stored was larger than a byte, replace this load
1925 // with the value stored truncated to a byte. If no truncation is
1926 // needed, the replacement is done in LoadNode::Identity().
1927 //
1928 Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
1929 Node* mem = in(MemNode::Memory);
1930 Node* value = can_see_stored_value(mem, phase);
1931 if (value && !phase->type(value)->higher_equal(_type))
1932 return new AndINode(value, phase->intcon(0xFF));
1933 // Identity call will handle the case where truncation is not needed.
1934 return LoadNode::Ideal(phase, can_reshape);
1935 }
1936
1937 const Type* LoadUBNode::Value(PhaseTransform *phase) const {
1938 Node* mem = in(MemNode::Memory);
1939 Node* value = can_see_stored_value(mem,phase);
1940 if (value != NULL && value->is_Con() &&
1941 !value->bottom_type()->higher_equal(_type)) {
1942 // If the input to the store does not fit with the load's result type,
1943 // it must be truncated. We can't delay until Ideal call since
1944 // a singleton Value is needed for split_thru_phi optimization.
1945 int con = value->get_int();
1946 return TypeInt::make(con & 0xFF);
1947 }
1948 return LoadNode::Value(phase);
1949 }
1950
1951 //--------------------------LoadUSNode::Ideal-------------------------------------
1952 //
1953 // If the previous store is to the same address as this load,
1954 // and the value stored was larger than a char, replace this load
1955 // with the value stored truncated to a char. If no truncation is
1956 // needed, the replacement is done in LoadNode::Identity().
1957 //
1958 Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1959 Node* mem = in(MemNode::Memory);
1960 Node* value = can_see_stored_value(mem,phase);
1961 if( value && !phase->type(value)->higher_equal( _type ) )
1962 return new AndINode(value,phase->intcon(0xFFFF));
1963 // Identity call will handle the case where truncation is not needed.
1964 return LoadNode::Ideal(phase, can_reshape);
1965 }
1966
1967 const Type* LoadUSNode::Value(PhaseTransform *phase) const {
1968 Node* mem = in(MemNode::Memory);
1969 Node* value = can_see_stored_value(mem,phase);
1970 if (value != NULL && value->is_Con() &&
1971 !value->bottom_type()->higher_equal(_type)) {
1972 // If the input to the store does not fit with the load's result type,
1973 // it must be truncated. We can't delay until Ideal call since
1974 // a singleton Value is needed for split_thru_phi optimization.
1975 int con = value->get_int();
1976 return TypeInt::make(con & 0xFFFF);
1977 }
1978 return LoadNode::Value(phase);
1979 }
1980
1981 //--------------------------LoadSNode::Ideal--------------------------------------
1982 //
1983 // If the previous store is to the same address as this load,
1984 // and the value stored was larger than a short, replace this load
1985 // with the value stored truncated to a short. If no truncation is
1986 // needed, the replacement is done in LoadNode::Identity().
1987 //
1988 Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1989 Node* mem = in(MemNode::Memory);
1990 Node* value = can_see_stored_value(mem,phase);
1991 if( value && !phase->type(value)->higher_equal( _type ) ) {
1992 Node *result = phase->transform( new LShiftINode(value, phase->intcon(16)) );
1993 return new RShiftINode(result, phase->intcon(16));
1994 }
1995 // Identity call will handle the case where truncation is not needed.
1996 return LoadNode::Ideal(phase, can_reshape);
1997 }
1998
1999 const Type* LoadSNode::Value(PhaseTransform *phase) const {
2000 Node* mem = in(MemNode::Memory);
2001 Node* value = can_see_stored_value(mem,phase);
2002 if (value != NULL && value->is_Con() &&
2003 !value->bottom_type()->higher_equal(_type)) {
2004 // If the input to the store does not fit with the load's result type,
2005 // it must be truncated. We can't delay until Ideal call since
2006 // a singleton Value is needed for split_thru_phi optimization.
2007 int con = value->get_int();
2008 return TypeInt::make((con << 16) >> 16);
2009 }
2010 return LoadNode::Value(phase);
2011 }
2012
2013 //=============================================================================
2014 //----------------------------LoadKlassNode::make------------------------------
2015 // Polymorphic factory method:
2016 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
2017 Compile* C = gvn.C;
2018 Node *ctl = NULL;
2019 // sanity check the alias category against the created node type
2020 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2021 assert(adr_type != NULL, "expecting TypeKlassPtr");
2022 #ifdef _LP64
2023 if (adr_type->is_ptr_to_narrowklass()) {
2024 assert(UseCompressedClassPointers, "no compressed klasses");
2025 Node* load_klass = gvn.transform(new LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
2026 return new DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2027 }
2028 #endif
2029 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2030 return new LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
2031 }
2032
2033 //------------------------------Value------------------------------------------
2034 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
2035 return klass_value_common(phase);
2036 }
2037
2038 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
2039 // Either input is TOP ==> the result is TOP
2040 const Type *t1 = phase->type( in(MemNode::Memory) );
2041 if (t1 == Type::TOP) return Type::TOP;
2042 Node *adr = in(MemNode::Address);
2043 const Type *t2 = phase->type( adr );
2044 if (t2 == Type::TOP) return Type::TOP;
2045 const TypePtr *tp = t2->is_ptr();
2046 if (TypePtr::above_centerline(tp->ptr()) ||
2047 tp->ptr() == TypePtr::Null) return Type::TOP;
2048
2049 // Return a more precise klass, if possible
2050 const TypeInstPtr *tinst = tp->isa_instptr();
2238 //------------------------------Value------------------------------------------
2239 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const {
2240 const Type *t = klass_value_common(phase);
2241 if (t == Type::TOP)
2242 return t;
2243
2244 return t->make_narrowklass();
2245 }
2246
2247 //------------------------------Identity---------------------------------------
2248 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k.
2249 // Also feed through the klass in Allocate(...klass...)._klass.
2250 Node* LoadNKlassNode::Identity( PhaseTransform *phase ) {
2251 Node *x = klass_identity_common(phase);
2252
2253 const Type *t = phase->type( x );
2254 if( t == Type::TOP ) return x;
2255 if( t->isa_narrowklass()) return x;
2256 assert (!t->isa_narrowoop(), "no narrow oop here");
2257
2258 return phase->transform(new EncodePKlassNode(x, t->make_narrowklass()));
2259 }
2260
2261 //------------------------------Value-----------------------------------------
2262 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const {
2263 // Either input is TOP ==> the result is TOP
2264 const Type *t1 = phase->type( in(MemNode::Memory) );
2265 if( t1 == Type::TOP ) return Type::TOP;
2266 Node *adr = in(MemNode::Address);
2267 const Type *t2 = phase->type( adr );
2268 if( t2 == Type::TOP ) return Type::TOP;
2269 const TypePtr *tp = t2->is_ptr();
2270 if (TypePtr::above_centerline(tp->ptr())) return Type::TOP;
2271 const TypeAryPtr *tap = tp->isa_aryptr();
2272 if( !tap ) return _type;
2273 return tap->size();
2274 }
2275
2276 //-------------------------------Ideal---------------------------------------
2277 // Feed through the length in AllocateArray(...length...)._length.
2278 Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2333 return allocated_length;
2334 }
2335 }
2336 }
2337
2338 return this;
2339
2340 }
2341
2342 //=============================================================================
2343 //---------------------------StoreNode::make-----------------------------------
2344 // Polymorphic factory method:
2345 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
2346 assert((mo == unordered || mo == release), "unexpected");
2347 Compile* C = gvn.C;
2348 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2349 ctl != NULL, "raw memory operations should have control edge");
2350
2351 switch (bt) {
2352 case T_BOOLEAN:
2353 case T_BYTE: return new StoreBNode(ctl, mem, adr, adr_type, val, mo);
2354 case T_INT: return new StoreINode(ctl, mem, adr, adr_type, val, mo);
2355 case T_CHAR:
2356 case T_SHORT: return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
2357 case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
2358 case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
2359 case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
2360 case T_METADATA:
2361 case T_ADDRESS:
2362 case T_OBJECT:
2363 #ifdef _LP64
2364 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2365 val = gvn.transform(new EncodePNode(val, val->bottom_type()->make_narrowoop()));
2366 return new StoreNNode(ctl, mem, adr, adr_type, val, mo);
2367 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2368 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2369 adr->bottom_type()->isa_rawptr())) {
2370 val = gvn.transform(new EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2371 return new StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
2372 }
2373 #endif
2374 {
2375 return new StorePNode(ctl, mem, adr, adr_type, val, mo);
2376 }
2377 }
2378 ShouldNotReachHere();
2379 return (StoreNode*)NULL;
2380 }
2381
2382 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2383 bool require_atomic = true;
2384 return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2385 }
2386
2387 StoreDNode* StoreDNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
2388 bool require_atomic = true;
2389 return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
2390 }
2391
2392
2393 //--------------------------bottom_type----------------------------------------
2394 const Type *StoreNode::bottom_type() const {
2395 return Type::MEMORY;
2396 }
2397
2398 //------------------------------hash-------------------------------------------
2399 uint StoreNode::hash() const {
2400 // unroll addition of interesting fields
2401 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn);
2402
2403 // Since they are not commoned, do not hash them:
2404 return NO_HASH;
2405 }
2406
2407 //------------------------------Ideal------------------------------------------
2408 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x).
2409 // When a store immediately follows a relevant allocation/initialization,
2762 // (see jck test stmt114.stmt11402.val).
2763 if (size <= 0 || size % unit != 0) return NULL;
2764 intptr_t count = size / unit;
2765 // Length too long; use fast hardware clear
2766 if (size > Matcher::init_array_short_size) return NULL;
2767 Node *mem = in(1);
2768 if( phase->type(mem)==Type::TOP ) return NULL;
2769 Node *adr = in(3);
2770 const Type* at = phase->type(adr);
2771 if( at==Type::TOP ) return NULL;
2772 const TypePtr* atp = at->isa_ptr();
2773 // adjust atp to be the correct array element address type
2774 if (atp == NULL) atp = TypePtr::BOTTOM;
2775 else atp = atp->add_offset(Type::OffsetBot);
2776 // Get base for derived pointer purposes
2777 if( adr->Opcode() != Op_AddP ) Unimplemented();
2778 Node *base = adr->in(1);
2779
2780 Node *zero = phase->makecon(TypeLong::ZERO);
2781 Node *off = phase->MakeConX(BytesPerLong);
2782 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2783 count--;
2784 while( count-- ) {
2785 mem = phase->transform(mem);
2786 adr = phase->transform(new AddPNode(base,adr,off));
2787 mem = new StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
2788 }
2789 return mem;
2790 }
2791
2792 //----------------------------step_through----------------------------------
2793 // Return allocation input memory edge if it is different instance
2794 // or itself if it is the one we are looking for.
2795 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
2796 Node* n = *np;
2797 assert(n->is_ClearArray(), "sanity");
2798 intptr_t offset;
2799 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
2800 // This method is called only before Allocate nodes are expanded during
2801 // macro nodes expansion. Before that ClearArray nodes are only generated
2802 // in LibraryCallKit::generate_arraycopy() which follows allocations.
2803 assert(alloc != NULL, "should have allocation");
2804 if (alloc->_idx == instance_id) {
2805 // Can not bypass initialization of the instance we are looking for.
2806 return false;
2807 }
2808 // Otherwise skip it.
2809 InitializeNode* init = alloc->initialization();
2810 if (init != NULL)
2811 *np = init->in(TypeFunc::Memory);
2812 else
2813 *np = alloc->in(TypeFunc::Memory);
2814 return true;
2815 }
2816
2817 //----------------------------clear_memory-------------------------------------
2818 // Generate code to initialize object storage to zero.
2819 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2820 intptr_t start_offset,
2821 Node* end_offset,
2822 PhaseGVN* phase) {
2823 Compile* C = phase->C;
2824 intptr_t offset = start_offset;
2825
2826 int unit = BytesPerLong;
2827 if ((offset % unit) != 0) {
2828 Node* adr = new AddPNode(dest, dest, phase->MakeConX(offset));
2829 adr = phase->transform(adr);
2830 const TypePtr* atp = TypeRawPtr::BOTTOM;
2831 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
2832 mem = phase->transform(mem);
2833 offset += BytesPerInt;
2834 }
2835 assert((offset % unit) == 0, "");
2836
2837 // Initialize the remaining stuff, if any, with a ClearArray.
2838 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase);
2839 }
2840
2841 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2842 Node* start_offset,
2843 Node* end_offset,
2844 PhaseGVN* phase) {
2845 if (start_offset == end_offset) {
2846 // nothing to do
2847 return mem;
2848 }
2849
2850 Compile* C = phase->C;
2851 int unit = BytesPerLong;
2852 Node* zbase = start_offset;
2853 Node* zend = end_offset;
2854
2855 // Scale to the unit required by the CPU:
2856 if (!Matcher::init_array_count_is_in_bytes) {
2857 Node* shift = phase->intcon(exact_log2(unit));
2858 zbase = phase->transform(new URShiftXNode(zbase, shift) );
2859 zend = phase->transform(new URShiftXNode(zend, shift) );
2860 }
2861
2862 // Bulk clear double-words
2863 Node* zsize = phase->transform(new SubXNode(zend, zbase) );
2864 Node* adr = phase->transform(new AddPNode(dest, dest, start_offset) );
2865 mem = new ClearArrayNode(ctl, mem, zsize, adr);
2866 return phase->transform(mem);
2867 }
2868
2869 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
2870 intptr_t start_offset,
2871 intptr_t end_offset,
2872 PhaseGVN* phase) {
2873 if (start_offset == end_offset) {
2874 // nothing to do
2875 return mem;
2876 }
2877
2878 Compile* C = phase->C;
2879 assert((end_offset % BytesPerInt) == 0, "odd end offset");
2880 intptr_t done_offset = end_offset;
2881 if ((done_offset % BytesPerLong) != 0) {
2882 done_offset -= BytesPerInt;
2883 }
2884 if (done_offset > start_offset) {
2885 mem = clear_memory(ctl, mem, dest,
2886 start_offset, phase->MakeConX(done_offset), phase);
2887 }
2888 if (done_offset < end_offset) { // emit the final 32-bit store
2889 Node* adr = new AddPNode(dest, dest, phase->MakeConX(done_offset));
2890 adr = phase->transform(adr);
2891 const TypePtr* atp = TypeRawPtr::BOTTOM;
2892 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
2893 mem = phase->transform(mem);
2894 done_offset += BytesPerInt;
2895 }
2896 assert(done_offset == end_offset, "");
2897 return mem;
2898 }
2899
2900 //=============================================================================
2901 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent)
2902 : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)),
2903 _adr_type(C->get_adr_type(alias_idx))
2904 {
2905 init_class_id(Class_MemBar);
2906 Node* top = C->top();
2907 init_req(TypeFunc::I_O,top);
2908 init_req(TypeFunc::FramePtr,top);
2909 init_req(TypeFunc::ReturnAdr,top);
2910 if (precedent != NULL)
2911 init_req(TypeFunc::Parms, precedent);
2912 }
2913
2914 //------------------------------cmp--------------------------------------------
2915 uint MemBarNode::hash() const { return NO_HASH; }
2916 uint MemBarNode::cmp( const Node &n ) const {
2917 return (&n == this); // Always fail except on self
2918 }
2919
2920 //------------------------------make-------------------------------------------
2921 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
2922 switch (opcode) {
2923 case Op_MemBarAcquire: return new MemBarAcquireNode(C, atp, pn);
2924 case Op_LoadFence: return new LoadFenceNode(C, atp, pn);
2925 case Op_MemBarRelease: return new MemBarReleaseNode(C, atp, pn);
2926 case Op_StoreFence: return new StoreFenceNode(C, atp, pn);
2927 case Op_MemBarAcquireLock: return new MemBarAcquireLockNode(C, atp, pn);
2928 case Op_MemBarReleaseLock: return new MemBarReleaseLockNode(C, atp, pn);
2929 case Op_MemBarVolatile: return new MemBarVolatileNode(C, atp, pn);
2930 case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn);
2931 case Op_Initialize: return new InitializeNode(C, atp, pn);
2932 case Op_MemBarStoreStore: return new MemBarStoreStoreNode(C, atp, pn);
2933 default: ShouldNotReachHere(); return NULL;
2934 }
2935 }
2936
2937 //------------------------------Ideal------------------------------------------
2938 // Return a node which is more "ideal" than the current node. Strip out
2939 // control copies
2940 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
2941 if (remove_dead_region(phase, can_reshape)) return this;
2942 // Don't bother trying to transform a dead node
2943 if (in(0) && in(0)->is_top()) {
2944 return NULL;
2945 }
2946
2947 // Eliminate volatile MemBars for scalar replaced objects.
2948 if (can_reshape && req() == (Precedent+1)) {
2949 bool eliminate = false;
2950 int opc = Opcode();
2951 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
2952 // Volatile field loads and stores.
2975 t_oop->offset() != Type::OffsetTop) {
2976 eliminate = true;
2977 }
2978 }
2979 } else if (opc == Op_MemBarRelease) {
2980 // Final field stores.
2981 Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase);
2982 if ((alloc != NULL) && alloc->is_Allocate() &&
2983 alloc->as_Allocate()->_is_non_escaping) {
2984 // The allocated object does not escape.
2985 eliminate = true;
2986 }
2987 }
2988 if (eliminate) {
2989 // Replace MemBar projections by its inputs.
2990 PhaseIterGVN* igvn = phase->is_IterGVN();
2991 igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
2992 igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
2993 // Must return either the original node (now dead) or a new node
2994 // (Do not return a top here, since that would break the uniqueness of top.)
2995 return new ConINode(TypeInt::ZERO);
2996 }
2997 }
2998 return NULL;
2999 }
3000
3001 //------------------------------Value------------------------------------------
3002 const Type *MemBarNode::Value( PhaseTransform *phase ) const {
3003 if( !in(0) ) return Type::TOP;
3004 if( phase->type(in(0)) == Type::TOP )
3005 return Type::TOP;
3006 return TypeTuple::MEMBAR;
3007 }
3008
3009 //------------------------------match------------------------------------------
3010 // Construct projections for memory.
3011 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
3012 switch (proj->_con) {
3013 case TypeFunc::Control:
3014 case TypeFunc::Memory:
3015 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
3016 }
3017 ShouldNotReachHere();
3018 return NULL;
3019 }
3020
3021 //===========================InitializeNode====================================
3022 // SUMMARY:
3023 // This node acts as a memory barrier on raw memory, after some raw stores.
3024 // The 'cooked' oop value feeds from the Initialize, not the Allocation.
3025 // The Initialize can 'capture' suitably constrained stores as raw inits.
3026 // It can coalesce related raw stores into larger units (called 'tiles').
3027 // It can avoid zeroing new storage for memory units which have raw inits.
3028 // At macro-expansion, it is marked 'complete', and does not optimize further.
3029 //
3030 // EXAMPLE:
3031 // The object 'new short[2]' occupies 16 bytes in a 32-bit machine.
3032 // ctl = incoming control; mem* = incoming memory
3033 // (Note: A star * on a memory edge denotes I/O and other standard edges.)
3034 // First allocate uninitialized memory and fill in the header:
3035 // alloc = (Allocate ctl mem* 16 #short[].klass ...)
3421 PhaseTransform* phase) {
3422 assert(stores_are_sane(phase), "");
3423 int i = captured_store_insertion_point(start, size_in_bytes, phase);
3424 if (i == 0) {
3425 return NULL; // something is dead
3426 } else if (i < 0) {
3427 return zero_memory(); // just primordial zero bits here
3428 } else {
3429 Node* st = in(i); // here is the store at this position
3430 assert(get_store_offset(st->as_Store(), phase) == start, "sanity");
3431 return st;
3432 }
3433 }
3434
3435 // Create, as a raw pointer, an address within my new object at 'offset'.
3436 Node* InitializeNode::make_raw_address(intptr_t offset,
3437 PhaseTransform* phase) {
3438 Node* addr = in(RawAddress);
3439 if (offset != 0) {
3440 Compile* C = phase->C;
3441 addr = phase->transform( new AddPNode(C->top(), addr,
3442 phase->MakeConX(offset)) );
3443 }
3444 return addr;
3445 }
3446
3447 // Clone the given store, converting it into a raw store
3448 // initializing a field or element of my new object.
3449 // Caller is responsible for retiring the original store,
3450 // with subsume_node or the like.
3451 //
3452 // From the example above InitializeNode::InitializeNode,
3453 // here are the old stores to be captured:
3454 // store1 = (StoreC init.Control init.Memory (+ oop 12) 1)
3455 // store2 = (StoreC init.Control store1 (+ oop 14) 2)
3456 //
3457 // Here is the changed code; note the extra edges on init:
3458 // alloc = (Allocate ...)
3459 // rawoop = alloc.RawAddress
3460 // rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1)
3461 // rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2)
4110 for (uint i = Compile::AliasIdxTop; i < req(); i++) {
4111 init_req(i,empty_mem);
4112 }
4113 assert(empty_memory() == empty_mem, "");
4114
4115 if( new_base != NULL && new_base->is_MergeMem() ) {
4116 MergeMemNode* mdef = new_base->as_MergeMem();
4117 assert(mdef->empty_memory() == empty_mem, "consistent sentinels");
4118 for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) {
4119 mms.set_memory(mms.memory2());
4120 }
4121 assert(base_memory() == mdef->base_memory(), "");
4122 } else {
4123 set_base_memory(new_base);
4124 }
4125 }
4126
4127 // Make a new, untransformed MergeMem with the same base as 'mem'.
4128 // If mem is itself a MergeMem, populate the result with the same edges.
4129 MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) {
4130 return new MergeMemNode(mem);
4131 }
4132
4133 //------------------------------cmp--------------------------------------------
4134 uint MergeMemNode::hash() const { return NO_HASH; }
4135 uint MergeMemNode::cmp( const Node &n ) const {
4136 return (&n == this); // Always fail except on self
4137 }
4138
4139 //------------------------------Identity---------------------------------------
4140 Node* MergeMemNode::Identity(PhaseTransform *phase) {
4141 // Identity if this merge point does not record any interesting memory
4142 // disambiguations.
4143 Node* base_mem = base_memory();
4144 Node* empty_mem = empty_memory();
4145 if (base_mem != empty_mem) { // Memory path is not dead?
4146 for (uint i = Compile::AliasIdxRaw; i < req(); i++) {
4147 Node* mem = in(i);
4148 if (mem != empty_mem && mem != base_mem) {
4149 return this; // Many memory splits; no change
4150 }
|