src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/opto

src/share/vm/opto/graphKit.cpp

Print this page
rev 9347 : 8140309: [REDO] failed: no mismatched stores, except on raw memory: StoreB StoreI
Summary: Mismatched stores on same slice possible with Unsafe.Put*Unaligned methods
Reviewed-by:


1440   map()->set_memory(mergemem);
1441 }
1442 
1443 //------------------------------set_all_memory_call----------------------------
1444 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1445   Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1446   set_all_memory(newmem);
1447 }
1448 
1449 //=============================================================================
1450 //
1451 // parser factory methods for MemNodes
1452 //
1453 // These are layered on top of the factory methods in LoadNode and StoreNode,
1454 // and integrate with the parser's memory state and _gvn engine.
1455 //
1456 
1457 // factory methods in "int adr_idx"
1458 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1459                           int adr_idx,
1460                           MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) {




1461   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1462   const TypePtr* adr_type = NULL; // debug-mode-only argument
1463   debug_only(adr_type = C->get_adr_type(adr_idx));
1464   Node* mem = memory(adr_idx);
1465   Node* ld;
1466   if (require_atomic_access && bt == T_LONG) {
1467     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency);
1468   } else if (require_atomic_access && bt == T_DOUBLE) {
1469     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency);
1470   } else {
1471     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
1472   }






1473   ld = _gvn.transform(ld);
1474   if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1475     // Improve graph before escape analysis and boxing elimination.
1476     record_for_igvn(ld);
1477   }
1478   return ld;
1479 }
1480 
1481 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1482                                 int adr_idx,
1483                                 MemNode::MemOrd mo,
1484                                 bool require_atomic_access) {


1485   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1486   const TypePtr* adr_type = NULL;
1487   debug_only(adr_type = C->get_adr_type(adr_idx));
1488   Node *mem = memory(adr_idx);
1489   Node* st;
1490   if (require_atomic_access && bt == T_LONG) {
1491     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1492   } else if (require_atomic_access && bt == T_DOUBLE) {
1493     st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1494   } else {
1495     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1496   }






1497   st = _gvn.transform(st);
1498   set_memory(st, adr_idx);
1499   // Back-to-back stores can only remove intermediate store with DU info
1500   // so push on worklist for optimizer.
1501   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1502     record_for_igvn(st);
1503 
1504   return st;
1505 }
1506 
1507 
1508 void GraphKit::pre_barrier(bool do_load,
1509                            Node* ctl,
1510                            Node* obj,
1511                            Node* adr,
1512                            uint  adr_idx,
1513                            Node* val,
1514                            const TypeOopPtr* val_type,
1515                            Node* pre_val,
1516                            BasicType bt) {


1570       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1571       break;
1572 
1573     case BarrierSet::ModRef:
1574       break;
1575 
1576     default      :
1577       ShouldNotReachHere();
1578 
1579   }
1580 }
1581 
1582 Node* GraphKit::store_oop(Node* ctl,
1583                           Node* obj,
1584                           Node* adr,
1585                           const TypePtr* adr_type,
1586                           Node* val,
1587                           const TypeOopPtr* val_type,
1588                           BasicType bt,
1589                           bool use_precise,
1590                           MemNode::MemOrd mo) {

1591   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1592   // could be delayed during Parse (for example, in adjust_map_after_if()).
1593   // Execute transformation here to avoid barrier generation in such case.
1594   if (_gvn.type(val) == TypePtr::NULL_PTR)
1595     val = _gvn.makecon(TypePtr::NULL_PTR);
1596 
1597   set_control(ctl);
1598   if (stopped()) return top(); // Dead path ?
1599 
1600   assert(bt == T_OBJECT, "sanity");
1601   assert(val != NULL, "not dead path");
1602   uint adr_idx = C->get_alias_index(adr_type);
1603   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1604 
1605   pre_barrier(true /* do_load */,
1606               control(), obj, adr, adr_idx, val, val_type,
1607               NULL /* pre_val */,
1608               bt);
1609 
1610   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo);
1611   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1612   return store;
1613 }
1614 
1615 // Could be an array or object we don't know at compile time (unsafe ref.)
1616 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1617                              Node* obj,   // containing obj
1618                              Node* adr,  // actual adress to store val at
1619                              const TypePtr* adr_type,
1620                              Node* val,
1621                              BasicType bt,
1622                              MemNode::MemOrd mo) {

1623   Compile::AliasType* at = C->alias_type(adr_type);
1624   const TypeOopPtr* val_type = NULL;
1625   if (adr_type->isa_instptr()) {
1626     if (at->field() != NULL) {
1627       // known field.  This code is a copy of the do_put_xxx logic.
1628       ciField* field = at->field();
1629       if (!field->type()->is_loaded()) {
1630         val_type = TypeInstPtr::BOTTOM;
1631       } else {
1632         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1633       }
1634     }
1635   } else if (adr_type->isa_aryptr()) {
1636     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1637   }
1638   if (val_type == NULL) {
1639     val_type = TypeInstPtr::BOTTOM;
1640   }
1641   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
1642 }
1643 
1644 
1645 //-------------------------array_element_address-------------------------
1646 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1647                                       const TypeInt* sizetype) {
1648   uint shift  = exact_log2(type2aelembytes(elembt));
1649   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1650 
1651   // short-circuit a common case (saves lots of confusing waste motion)
1652   jint idx_con = find_int_con(idx, -1);
1653   if (idx_con >= 0) {
1654     intptr_t offset = header + ((intptr_t)idx_con << shift);
1655     return basic_plus_adr(ary, offset);
1656   }
1657 
1658   // must be correct type for alignment purposes
1659   Node* base  = basic_plus_adr(ary, header);
1660   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype);
1661   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );




1440   map()->set_memory(mergemem);
1441 }
1442 
1443 //------------------------------set_all_memory_call----------------------------
1444 void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
1445   Node* newmem = _gvn.transform( new ProjNode(call, TypeFunc::Memory, separate_io_proj) );
1446   set_all_memory(newmem);
1447 }
1448 
1449 //=============================================================================
1450 //
1451 // parser factory methods for MemNodes
1452 //
1453 // These are layered on top of the factory methods in LoadNode and StoreNode,
1454 // and integrate with the parser's memory state and _gvn engine.
1455 //
1456 
1457 // factory methods in "int adr_idx"
1458 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1459                           int adr_idx,
1460                           MemNode::MemOrd mo,
1461                           LoadNode::ControlDependency control_dependency,
1462                           bool require_atomic_access,
1463                           bool unaligned,
1464                           bool mismatched) {
1465   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1466   const TypePtr* adr_type = NULL; // debug-mode-only argument
1467   debug_only(adr_type = C->get_adr_type(adr_idx));
1468   Node* mem = memory(adr_idx);
1469   Node* ld;
1470   if (require_atomic_access && bt == T_LONG) {
1471     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency);
1472   } else if (require_atomic_access && bt == T_DOUBLE) {
1473     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency);
1474   } else {
1475     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
1476   }
1477   if (unaligned) {
1478     ld->as_Load()->set_unaligned_access();
1479   }
1480   if (mismatched) {
1481     ld->as_Load()->set_mismatched_access();
1482   }
1483   ld = _gvn.transform(ld);
1484   if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
1485     // Improve graph before escape analysis and boxing elimination.
1486     record_for_igvn(ld);
1487   }
1488   return ld;
1489 }
1490 
1491 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1492                                 int adr_idx,
1493                                 MemNode::MemOrd mo,
1494                                 bool require_atomic_access,
1495                                 bool unaligned,
1496                                 bool mismatched) {
1497   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1498   const TypePtr* adr_type = NULL;
1499   debug_only(adr_type = C->get_adr_type(adr_idx));
1500   Node *mem = memory(adr_idx);
1501   Node* st;
1502   if (require_atomic_access && bt == T_LONG) {
1503     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1504   } else if (require_atomic_access && bt == T_DOUBLE) {
1505     st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1506   } else {
1507     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1508   }
1509   if (unaligned) {
1510     st->as_Store()->set_unaligned_access();
1511   }
1512   if (mismatched) {
1513     st->as_Store()->set_mismatched_access();
1514   }
1515   st = _gvn.transform(st);
1516   set_memory(st, adr_idx);
1517   // Back-to-back stores can only remove intermediate store with DU info
1518   // so push on worklist for optimizer.
1519   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1520     record_for_igvn(st);
1521 
1522   return st;
1523 }
1524 
1525 
1526 void GraphKit::pre_barrier(bool do_load,
1527                            Node* ctl,
1528                            Node* obj,
1529                            Node* adr,
1530                            uint  adr_idx,
1531                            Node* val,
1532                            const TypeOopPtr* val_type,
1533                            Node* pre_val,
1534                            BasicType bt) {


1588       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
1589       break;
1590 
1591     case BarrierSet::ModRef:
1592       break;
1593 
1594     default      :
1595       ShouldNotReachHere();
1596 
1597   }
1598 }
1599 
1600 Node* GraphKit::store_oop(Node* ctl,
1601                           Node* obj,
1602                           Node* adr,
1603                           const TypePtr* adr_type,
1604                           Node* val,
1605                           const TypeOopPtr* val_type,
1606                           BasicType bt,
1607                           bool use_precise,
1608                           MemNode::MemOrd mo,
1609                           bool mismatched) {
1610   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1611   // could be delayed during Parse (for example, in adjust_map_after_if()).
1612   // Execute transformation here to avoid barrier generation in such case.
1613   if (_gvn.type(val) == TypePtr::NULL_PTR)
1614     val = _gvn.makecon(TypePtr::NULL_PTR);
1615 
1616   set_control(ctl);
1617   if (stopped()) return top(); // Dead path ?
1618 
1619   assert(bt == T_OBJECT, "sanity");
1620   assert(val != NULL, "not dead path");
1621   uint adr_idx = C->get_alias_index(adr_type);
1622   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1623 
1624   pre_barrier(true /* do_load */,
1625               control(), obj, adr, adr_idx, val, val_type,
1626               NULL /* pre_val */,
1627               bt);
1628 
1629   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1630   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1631   return store;
1632 }
1633 
1634 // Could be an array or object we don't know at compile time (unsafe ref.)
1635 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1636                              Node* obj,   // containing obj
1637                              Node* adr,  // actual adress to store val at
1638                              const TypePtr* adr_type,
1639                              Node* val,
1640                              BasicType bt,
1641                              MemNode::MemOrd mo,
1642                              bool mismatched) {
1643   Compile::AliasType* at = C->alias_type(adr_type);
1644   const TypeOopPtr* val_type = NULL;
1645   if (adr_type->isa_instptr()) {
1646     if (at->field() != NULL) {
1647       // known field.  This code is a copy of the do_put_xxx logic.
1648       ciField* field = at->field();
1649       if (!field->type()->is_loaded()) {
1650         val_type = TypeInstPtr::BOTTOM;
1651       } else {
1652         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1653       }
1654     }
1655   } else if (adr_type->isa_aryptr()) {
1656     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1657   }
1658   if (val_type == NULL) {
1659     val_type = TypeInstPtr::BOTTOM;
1660   }
1661   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1662 }
1663 
1664 
1665 //-------------------------array_element_address-------------------------
1666 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1667                                       const TypeInt* sizetype) {
1668   uint shift  = exact_log2(type2aelembytes(elembt));
1669   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1670 
1671   // short-circuit a common case (saves lots of confusing waste motion)
1672   jint idx_con = find_int_con(idx, -1);
1673   if (idx_con >= 0) {
1674     intptr_t offset = header + ((intptr_t)idx_con << shift);
1675     return basic_plus_adr(ary, offset);
1676   }
1677 
1678   // must be correct type for alignment purposes
1679   Node* base  = basic_plus_adr(ary, header);
1680   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype);
1681   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );


src/share/vm/opto/graphKit.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File