< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




1435   map()->set_memory(mergemem);
1436 }
1437 
1438 //------------------------------set_all_memory_call----------------------------
1439 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1440   Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1441   set_all_memory(newmem);
1442 }
1443 
1444 //=============================================================================
1445 //
1446 // parser factory methods for MemNodes
1447 //
1448 // These are layered on top of the factory methods in LoadNode and StoreNode,
1449 // and integrate with the parser's memory state and _gvn engine.
1450 //
1451 
1452 // factory methods in "int adr_idx"
1453 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1454                           int adr_idx,
1455                           MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) {




1456   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1457   const TypePtr* adr_type = NULL; // debug-mode-only argument
1458   debug_only(adr_type = C->get_adr_type(adr_idx));
1459   Node* mem = memory(adr_idx);
1460   Node* ld;
1461   if (require_atomic_access && bt == T_LONG) {
1462     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
1463   } else if (require_atomic_access && bt == T_DOUBLE) {
1464     ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
1465   } else {
1466     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
1467   }






1468   ld = _gvn.transform(ld);
1469   if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1470     // Improve graph before escape analysis and boxing elimination.
1471     record_for_igvn(ld);
1472   }
1473   return ld;
1474 }
1475 
1476 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1477                                 int adr_idx,
1478                                 MemNode::MemOrd mo,
1479                                 bool require_atomic_access) {


1480   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1481   const TypePtr* adr_type = NULL;
1482   debug_only(adr_type = C->get_adr_type(adr_idx));
1483   Node *mem = memory(adr_idx);
1484   Node* st;
1485   if (require_atomic_access && bt == T_LONG) {
1486     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1487   } else if (require_atomic_access && bt == T_DOUBLE) {
1488     st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1489   } else {
1490     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1491   }






1492   st = _gvn.transform(st);
1493   set_memory(st, adr_idx);
1494   // Back-to-back stores can only remove intermediate store with DU info
1495   // so push on worklist for optimizer.
1496   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1497     record_for_igvn(st);
1498 
1499   return st;
1500 }
1501 
1502 
1503 void GraphKit::pre_barrier(bool do_load,
1504                            Node* ctl,
1505                            Node* obj,
1506                            Node* adr,
1507                            uint  adr_idx,
1508                            Node* val,
1509                            const TypeOopPtr* val_type,
1510                            Node* pre_val,
1511                            BasicType bt) {


1571       break;
1572 
1573     case BarrierSet::ModRef:
1574       break;
1575 
1576     case BarrierSet::Other:
1577     default      :
1578       ShouldNotReachHere();
1579 
1580   }
1581 }
1582 
1583 Node* GraphKit::store_oop(Node* ctl,
1584                           Node* obj,
1585                           Node* adr,
1586                           const TypePtr* adr_type,
1587                           Node* val,
1588                           const TypeOopPtr* val_type,
1589                           BasicType bt,
1590                           bool use_precise,
1591                           MemNode::MemOrd mo) {

1592   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1593   // could be delayed during Parse (for example, in adjust_map_after_if()).
1594   // Execute transformation here to avoid barrier generation in such case.
1595   if (_gvn.type(val) == TypePtr::NULL_PTR)
1596     val = _gvn.makecon(TypePtr::NULL_PTR);
1597 
1598   set_control(ctl);
1599   if (stopped()) return top(); // Dead path ?
1600 
1601   assert(bt == T_OBJECT, "sanity");
1602   assert(val != NULL, "not dead path");
1603   uint adr_idx = C->get_alias_index(adr_type);
1604   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1605 
1606   pre_barrier(true /* do_load */,
1607               control(), obj, adr, adr_idx, val, val_type,
1608               NULL /* pre_val */,
1609               bt);
1610 
1611   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo);
1612   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1613   return store;
1614 }
1615 
1616 // Could be an array or object we don't know at compile time (unsafe ref.)
1617 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1618                              Node* obj,   // containing obj
1619                              Node* adr,  // actual adress to store val at
1620                              const TypePtr* adr_type,
1621                              Node* val,
1622                              BasicType bt,
1623                              MemNode::MemOrd mo) {

1624   Compile::AliasType* at = C->alias_type(adr_type);
1625   const TypeOopPtr* val_type = NULL;
1626   if (adr_type->isa_instptr()) {
1627     if (at->field() != NULL) {
1628       // known field.  This code is a copy of the do_put_xxx logic.
1629       ciField* field = at->field();
1630       if (!field->type()->is_loaded()) {
1631         val_type = TypeInstPtr::BOTTOM;
1632       } else {
1633         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1634       }
1635     }
1636   } else if (adr_type->isa_aryptr()) {
1637     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1638   }
1639   if (val_type == NULL) {
1640     val_type = TypeInstPtr::BOTTOM;
1641   }
1642   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
1643 }
1644 
1645 
1646 //-------------------------array_element_address-------------------------
1647 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1648                                       const TypeInt* sizetype, Node* ctrl) {
1649   uint shift  = exact_log2(type2aelembytes(elembt));
1650   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1651 
1652   // short-circuit a common case (saves lots of confusing waste motion)
1653   jint idx_con = find_int_con(idx, -1);
1654   if (idx_con >= 0) {
1655     intptr_t offset = header + ((intptr_t)idx_con << shift);
1656     return basic_plus_adr(ary, offset);
1657   }
1658 
1659   // must be correct type for alignment purposes
1660   Node* base  = basic_plus_adr(ary, header);
1661 #ifdef _LP64
1662   // The scaled index operand to AddP must be a clean 64-bit value.




1435   map()->set_memory(mergemem);
1436 }
1437 
1438 //------------------------------set_all_memory_call----------------------------
1439 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1440   Node* newmem = _gvn.transform( new (C) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1441   set_all_memory(newmem);
1442 }
1443 
1444 //=============================================================================
1445 //
1446 // parser factory methods for MemNodes
1447 //
1448 // These are layered on top of the factory methods in LoadNode and StoreNode,
1449 // and integrate with the parser's memory state and _gvn engine.
1450 //
1451 
1452 // factory methods in "int adr_idx"
1453 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1454                           int adr_idx,
1455                           MemNode::MemOrd mo,
1456                           LoadNode::ControlDependency control_dependency,
1457                           bool require_atomic_access,
1458                           bool unaligned,
1459                           bool mismatched) {
1460   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1461   const TypePtr* adr_type = NULL; // debug-mode-only argument
1462   debug_only(adr_type = C->get_adr_type(adr_idx));
1463   Node* mem = memory(adr_idx);
1464   Node* ld;
1465   if (require_atomic_access && bt == T_LONG) {
1466     ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
1467   } else if (require_atomic_access && bt == T_DOUBLE) {
1468     ld = LoadDNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo, control_dependency);
1469   } else {
1470     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
1471   }
1472   if (unaligned) {
1473     ld->as_Load()->set_unaligned_access();
1474   }
1475   if (mismatched) {
1476     ld->as_Load()->set_mismatched_access();
1477   }
1478   ld = _gvn.transform(ld);
1479   if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1480     // Improve graph before escape analysis and boxing elimination.
1481     record_for_igvn(ld);
1482   }
1483   return ld;
1484 }
1485 
1486 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1487                                 int adr_idx,
1488                                 MemNode::MemOrd mo,
1489                                 bool require_atomic_access,
1490                                 bool unaligned,
1491                                 bool mismatched) {
1492   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1493   const TypePtr* adr_type = NULL;
1494   debug_only(adr_type = C->get_adr_type(adr_idx));
1495   Node *mem = memory(adr_idx);
1496   Node* st;
1497   if (require_atomic_access && bt == T_LONG) {
1498     st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1499   } else if (require_atomic_access && bt == T_DOUBLE) {
1500     st = StoreDNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
1501   } else {
1502     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1503   }
1504   if (unaligned) {
1505     st->as_Store()->set_unaligned_access();
1506   }
1507   if (mismatched) {
1508     st->as_Store()->set_mismatched_access();
1509   }
1510   st = _gvn.transform(st);
1511   set_memory(st, adr_idx);
1512   // Back-to-back stores can only remove intermediate store with DU info
1513   // so push on worklist for optimizer.
1514   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1515     record_for_igvn(st);
1516 
1517   return st;
1518 }
1519 
1520 
1521 void GraphKit::pre_barrier(bool do_load,
1522                            Node* ctl,
1523                            Node* obj,
1524                            Node* adr,
1525                            uint  adr_idx,
1526                            Node* val,
1527                            const TypeOopPtr* val_type,
1528                            Node* pre_val,
1529                            BasicType bt) {


1589       break;
1590 
1591     case BarrierSet::ModRef:
1592       break;
1593 
1594     case BarrierSet::Other:
1595     default      :
1596       ShouldNotReachHere();
1597 
1598   }
1599 }
1600 
1601 Node* GraphKit::store_oop(Node* ctl,
1602                           Node* obj,
1603                           Node* adr,
1604                           const TypePtr* adr_type,
1605                           Node* val,
1606                           const TypeOopPtr* val_type,
1607                           BasicType bt,
1608                           bool use_precise,
1609                           MemNode::MemOrd mo,
1610                           bool mismatched) {
1611   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1612   // could be delayed during Parse (for example, in adjust_map_after_if()).
1613   // Execute transformation here to avoid barrier generation in such case.
1614   if (_gvn.type(val) == TypePtr::NULL_PTR)
1615     val = _gvn.makecon(TypePtr::NULL_PTR);
1616 
1617   set_control(ctl);
1618   if (stopped()) return top(); // Dead path ?
1619 
1620   assert(bt == T_OBJECT, "sanity");
1621   assert(val != NULL, "not dead path");
1622   uint adr_idx = C->get_alias_index(adr_type);
1623   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1624 
1625   pre_barrier(true /* do_load */,
1626               control(), obj, adr, adr_idx, val, val_type,
1627               NULL /* pre_val */,
1628               bt);
1629 
1630   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1631   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1632   return store;
1633 }
1634 
1635 // Could be an array or object we don't know at compile time (unsafe ref.)
1636 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1637                              Node* obj,   // containing obj
1638                              Node* adr,  // actual adress to store val at
1639                              const TypePtr* adr_type,
1640                              Node* val,
1641                              BasicType bt,
1642                              MemNode::MemOrd mo,
1643                              bool mismatched) {
1644   Compile::AliasType* at = C->alias_type(adr_type);
1645   const TypeOopPtr* val_type = NULL;
1646   if (adr_type->isa_instptr()) {
1647     if (at->field() != NULL) {
1648       // known field.  This code is a copy of the do_put_xxx logic.
1649       ciField* field = at->field();
1650       if (!field->type()->is_loaded()) {
1651         val_type = TypeInstPtr::BOTTOM;
1652       } else {
1653         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1654       }
1655     }
1656   } else if (adr_type->isa_aryptr()) {
1657     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1658   }
1659   if (val_type == NULL) {
1660     val_type = TypeInstPtr::BOTTOM;
1661   }
1662   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1663 }
1664 
1665 
1666 //-------------------------array_element_address-------------------------
1667 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1668                                       const TypeInt* sizetype, Node* ctrl) {
1669   uint shift  = exact_log2(type2aelembytes(elembt));
1670   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1671 
1672   // short-circuit a common case (saves lots of confusing waste motion)
1673   jint idx_con = find_int_con(idx, -1);
1674   if (idx_con >= 0) {
1675     intptr_t offset = header + ((intptr_t)idx_con << shift);
1676     return basic_plus_adr(ary, offset);
1677   }
1678 
1679   // must be correct type for alignment purposes
1680   Node* base  = basic_plus_adr(ary, header);
1681 #ifdef _LP64
1682   // The scaled index operand to AddP must be a clean 64-bit value.


< prev index next >