src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6711117_new Sdiff src/share/vm/opto

src/share/vm/opto/memnode.cpp

Print this page




 201     // If transformed to a MergeMem, get the desired slice
 202     // Otherwise the returned node represents memory for every slice
 203     mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
 204     // Update input if it is progress over what we have now
 205   }
 206   return mem;
 207 }
 208 
 209 //--------------------------Ideal_common---------------------------------------
 210 // Look for degenerate control and memory inputs.  Bypass MergeMem inputs.
 211 // Unhook non-raw memories from complete (macro-expanded) initializations.
 212 Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
 213   // If our control input is a dead region, kill all below the region
 214   Node *ctl = in(MemNode::Control);
 215   if (ctl && remove_dead_region(phase, can_reshape))
 216     return this;
 217   ctl = in(MemNode::Control);
 218   // Don't bother trying to transform a dead node
 219   if( ctl && ctl->is_top() )  return NodeSentinel;
 220 




















 221   // Ignore if memory is dead, or self-loop
 222   Node *mem = in(MemNode::Memory);
 223   if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL
 224   assert( mem != this, "dead loop in MemNode::Ideal" );
 225 
 226   Node *address = in(MemNode::Address);
 227   const Type *t_adr = phase->type( address );
 228   if( t_adr == Type::TOP )              return NodeSentinel; // caller will return NULL
 229 
 230   PhaseIterGVN *igvn = phase->is_IterGVN();
 231   if( can_reshape && igvn != NULL && igvn->_worklist.member(address) ) {
 232     // The address's base and type may change when the address is processed.
 233     // Delay this mem node transformation until the address is processed.
 234     phase->is_IterGVN()->_worklist.push(this);
 235     return NodeSentinel; // caller will return NULL
 236   }
 237 








 238   // Avoid independent memory operations
 239   Node* old_mem = mem;
 240 
 241   // The code which unhooks non-raw memories from complete (macro-expanded)
 242   // initializations was removed. After macro-expansion all stores catched
 243   // by Initialize node became raw stores and there is no information
 244   // which memory slices they modify. So it is unsafe to move any memory
 245   // operation above these stores. Also in most cases hooked non-raw memories
 246   // were already unhooked by using information from detect_ptr_independence()
 247   // and find_previous_store().
 248 
 249   if (mem->is_MergeMem()) {
 250     MergeMemNode* mmem = mem->as_MergeMem();
 251     const TypePtr *tp = t_adr->is_ptr();
 252 
 253     mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty);
 254   }
 255 
 256   if (mem != old_mem) {
 257     set_req(MemNode::Memory, mem);


1291 // If the load is from Field memory and the pointer is non-null, we can
1292 // zero out the control input.
1293 // If the offset is constant and the base is an object allocation,
1294 // try to hook me up to the exact initializing store.
1295 Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1296   Node* p = MemNode::Ideal_common(phase, can_reshape);
1297   if (p)  return (p == NodeSentinel) ? NULL : p;
1298 
1299   Node* ctrl    = in(MemNode::Control);
1300   Node* address = in(MemNode::Address);
1301 
1302   // Skip up past a SafePoint control.  Cannot do this for Stores because
1303   // pointer stores & cardmarks must stay on the same side of a SafePoint.
1304   if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint &&
1305       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
1306     ctrl = ctrl->in(0);
1307     set_req(MemNode::Control,ctrl);
1308   }
1309 
1310   // Check for useless control edge in some common special cases
1311   if (in(MemNode::Control) != NULL) {
1312     intptr_t ignore = 0;
1313     Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1314     if (base != NULL
1315         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1316         && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw

1317         && all_controls_dominate(base, phase->C->start())) {
1318       // A method-invariant, non-null address (constant or 'this' argument).
1319       set_req(MemNode::Control, NULL);
1320     }
1321   }
1322 
1323   if (EliminateAutoBox && can_reshape && in(Address)->is_AddP()) {
1324     Node* base = in(Address)->in(AddPNode::Base);
1325     if (base != NULL) {
1326       Compile::AliasType* atp = phase->C->alias_type(adr_type());
1327       if (is_autobox_object(atp)) {
1328         Node* result = eliminate_autobox(phase);
1329         if (result != NULL) return result;
1330       }
1331     }
1332   }
1333 
1334   Node* mem = in(MemNode::Memory);
1335   const TypePtr *addr_t = phase->type(address)->isa_ptr();
1336 
1337   if (addr_t != NULL) {
1338     // try to optimize our memory input
1339     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase);
1340     if (opt_mem != mem) {
1341       set_req(MemNode::Memory, opt_mem);
1342       if (phase->type( opt_mem ) == Type::TOP) return NULL;
1343       return this;
1344     }
1345     const TypeOopPtr *t_oop = addr_t->isa_oopptr();


1440         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1441       // t might actually be lower than _type, if _type is a unique
1442       // concrete subclass of abstract class t.
1443       // Make sure the reference is not into the header, by comparing
1444       // the offset against the offset of the start of the array's data.
1445       // Different array types begin at slightly different offsets (12 vs. 16).
1446       // We choose T_BYTE as an example base type that is least restrictive
1447       // as to alignment, which will therefore produce the smallest
1448       // possible base offset.
1449       const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1450       if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
1451         const Type* jt = t->join(_type);
1452         // In any case, do not allow the join, per se, to empty out the type.
1453         if (jt->empty() && !t->empty()) {
1454           // This can happen if a interface-typed array narrows to a class type.
1455           jt = _type;
1456         }
1457 
1458         if (EliminateAutoBox) {
1459           // The pointers in the autobox arrays are always non-null
1460           Node* base = in(Address)->in(AddPNode::Base);
1461           if (base != NULL) {
1462             Compile::AliasType* atp = phase->C->alias_type(base->adr_type());
1463             if (is_autobox_cache(atp)) {
1464               return jt->join(TypePtr::NOTNULL)->is_ptr();
1465             }
1466           }
1467         }
1468         return jt;
1469       }
1470     }
1471   } else if (tp->base() == Type::InstPtr) {
1472     assert( off != Type::OffsetBot ||
1473             // arrays can be cast to Objects
1474             tp->is_oopptr()->klass()->is_java_lang_Object() ||
1475             // unsafe field access may not have a constant offset
1476             phase->C->has_unsafe_access(),
1477             "Field accesses must be precise" );
1478     // For oop loads, we expect the _type to be precise
1479   } else if (tp->base() == Type::KlassPtr) {
1480     assert( off != Type::OffsetBot ||
1481             // arrays can be cast to Objects




 201     // If transformed to a MergeMem, get the desired slice
 202     // Otherwise the returned node represents memory for every slice
 203     mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m;
 204     // Update input if it is progress over what we have now
 205   }
 206   return mem;
 207 }
 208 
 209 //--------------------------Ideal_common---------------------------------------
 210 // Look for degenerate control and memory inputs.  Bypass MergeMem inputs.
 211 // Unhook non-raw memories from complete (macro-expanded) initializations.
 212 Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
 213   // If our control input is a dead region, kill all below the region
 214   Node *ctl = in(MemNode::Control);
 215   if (ctl && remove_dead_region(phase, can_reshape))
 216     return this;
 217   ctl = in(MemNode::Control);
 218   // Don't bother trying to transform a dead node
 219   if( ctl && ctl->is_top() )  return NodeSentinel;
 220 
 221   PhaseIterGVN *igvn = phase->is_IterGVN();
 222   // Wait if control on the worklist.
 223   if (ctl && can_reshape && igvn != NULL) {
 224     Node* bol = NULL;
 225     Node* cmp = NULL;
 226     if (ctl->in(0)->is_If()) {
 227       assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity");
 228       bol = ctl->in(0)->in(1);
 229       if (bol->is_Bool())
 230         cmp = ctl->in(0)->in(1)->in(1);
 231     }
 232     if (igvn->_worklist.member(ctl) ||
 233         (bol != NULL && igvn->_worklist.member(bol)) ||
 234         (cmp != NULL && igvn->_worklist.member(cmp)) ) {
 235       // This control path may be dead.
 236       // Delay this memory node transformation until the control is processed.
 237       phase->is_IterGVN()->_worklist.push(this);
 238       return NodeSentinel; // caller will return NULL
 239     }
 240   }
 241   // Ignore if memory is dead, or self-loop
 242   Node *mem = in(MemNode::Memory);
 243   if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL
 244   assert( mem != this, "dead loop in MemNode::Ideal" );
 245 
 246   Node *address = in(MemNode::Address);
 247   const Type *t_adr = phase->type( address );
 248   if( t_adr == Type::TOP )              return NodeSentinel; // caller will return NULL
 249 
 250   if( can_reshape && igvn != NULL &&
 251       (igvn->_worklist.member(address) || phase->type(address) != adr_type()) ) {
 252     // The address's base and type may change when the address is processed.
 253     // Delay this mem node transformation until the address is processed.
 254     phase->is_IterGVN()->_worklist.push(this);
 255     return NodeSentinel; // caller will return NULL
 256   }
 257 
 258 #ifdef ASSERT
 259   Node* base = NULL;
 260   if (address->is_AddP())
 261     base = address->in(AddPNode::Base);
 262   assert(base == NULL || t_adr->isa_rawptr() ||
 263         !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
 264 #endif
 265 
 266   // Avoid independent memory operations
 267   Node* old_mem = mem;
 268 
 269   // The code which unhooks non-raw memories from complete (macro-expanded)
 270   // initializations was removed. After macro-expansion all stores catched
 271   // by Initialize node became raw stores and there is no information
 272   // which memory slices they modify. So it is unsafe to move any memory
 273   // operation above these stores. Also in most cases hooked non-raw memories
 274   // were already unhooked by using information from detect_ptr_independence()
 275   // and find_previous_store().
 276 
 277   if (mem->is_MergeMem()) {
 278     MergeMemNode* mmem = mem->as_MergeMem();
 279     const TypePtr *tp = t_adr->is_ptr();
 280 
 281     mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty);
 282   }
 283 
 284   if (mem != old_mem) {
 285     set_req(MemNode::Memory, mem);


1319 // If the load is from Field memory and the pointer is non-null, we can
1320 // zero out the control input.
1321 // If the offset is constant and the base is an object allocation,
1322 // try to hook me up to the exact initializing store.
1323 Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1324   Node* p = MemNode::Ideal_common(phase, can_reshape);
1325   if (p)  return (p == NodeSentinel) ? NULL : p;
1326 
1327   Node* ctrl    = in(MemNode::Control);
1328   Node* address = in(MemNode::Address);
1329 
1330   // Skip up past a SafePoint control.  Cannot do this for Stores because
1331   // pointer stores & cardmarks must stay on the same side of a SafePoint.
1332   if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint &&
1333       phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) {
1334     ctrl = ctrl->in(0);
1335     set_req(MemNode::Control,ctrl);
1336   }
1337 
1338   // Check for useless control edge in some common special cases

1339   intptr_t ignore = 0;
1340   Node*    base   = AddPNode::Ideal_base_and_offset(address, phase, ignore);
1341   if (base != NULL
1342       && phase->type(base)->higher_equal(TypePtr::NOTNULL)
1343       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
1344     if (in(MemNode::Control) != NULL
1345         && all_controls_dominate(base, phase->C->start())) {
1346       // A method-invariant, non-null address (constant or 'this' argument).
1347       set_req(MemNode::Control, NULL);
1348     }

1349 
1350     if (EliminateAutoBox && can_reshape && address->is_AddP()) {
1351       Node* base = address->in(AddPNode::Base);

1352       Compile::AliasType* atp = phase->C->alias_type(adr_type());
1353       if (is_autobox_object(atp)) {
1354         Node* result = eliminate_autobox(phase);
1355         if (result != NULL) return result;
1356       }
1357     }
1358   }
1359 
1360   Node* mem = in(MemNode::Memory);
1361   const TypePtr *addr_t = phase->type(address)->isa_ptr();
1362 
1363   if (addr_t != NULL) {
1364     // try to optimize our memory input
1365     Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase);
1366     if (opt_mem != mem) {
1367       set_req(MemNode::Memory, opt_mem);
1368       if (phase->type( opt_mem ) == Type::TOP) return NULL;
1369       return this;
1370     }
1371     const TypeOopPtr *t_oop = addr_t->isa_oopptr();


1466         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1467       // t might actually be lower than _type, if _type is a unique
1468       // concrete subclass of abstract class t.
1469       // Make sure the reference is not into the header, by comparing
1470       // the offset against the offset of the start of the array's data.
1471       // Different array types begin at slightly different offsets (12 vs. 16).
1472       // We choose T_BYTE as an example base type that is least restrictive
1473       // as to alignment, which will therefore produce the smallest
1474       // possible base offset.
1475       const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1476       if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
1477         const Type* jt = t->join(_type);
1478         // In any case, do not allow the join, per se, to empty out the type.
1479         if (jt->empty() && !t->empty()) {
1480           // This can happen if a interface-typed array narrows to a class type.
1481           jt = _type;
1482         }
1483 
1484         if (EliminateAutoBox) {
1485           // The pointers in the autobox arrays are always non-null
1486           Node* base = adr->in(AddPNode::Base);
1487           if (base != NULL && phase->type(base)->higher_equal(TypePtr::NOTNULL)) {
1488             Compile::AliasType* atp = phase->C->alias_type(base->adr_type());
1489             if (is_autobox_cache(atp)) {
1490               return jt->join(TypePtr::NOTNULL)->is_ptr();
1491             }
1492           }
1493         }
1494         return jt;
1495       }
1496     }
1497   } else if (tp->base() == Type::InstPtr) {
1498     assert( off != Type::OffsetBot ||
1499             // arrays can be cast to Objects
1500             tp->is_oopptr()->klass()->is_java_lang_Object() ||
1501             // unsafe field access may not have a constant offset
1502             phase->C->has_unsafe_access(),
1503             "Field accesses must be precise" );
1504     // For oop loads, we expect the _type to be precise
1505   } else if (tp->base() == Type::KlassPtr) {
1506     assert( off != Type::OffsetBot ||
1507             // arrays can be cast to Objects


src/share/vm/opto/memnode.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File