< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page




1498   if (require_atomic_access && bt == T_LONG) {
1499     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1500   } else if (require_atomic_access && bt == T_DOUBLE) {
1501     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1502   } else {
1503     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1504   }
1505   ld = _gvn.transform(ld);
1506   if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1507     // Improve graph before escape analysis and boxing elimination.
1508     record_for_igvn(ld);
1509   }
1510   return ld;
1511 }
1512 
1513 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1514                                 int adr_idx,
1515                                 MemNode::MemOrd mo,
1516                                 bool require_atomic_access,
1517                                 bool unaligned,
1518                                 bool mismatched) {

1519   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1520   const TypePtr* adr_type = NULL;
1521   debug_only(adr_type = C->get_adr_type(adr_idx));
1522   Node *mem = memory(adr_idx);
1523   Node* st;
1524   if (require_atomic_access && bt == T_LONG) {
1525     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1526   } else if (require_atomic_access && bt == T_DOUBLE) {
1527     st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);




1528   } else {
1529     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1530   }
1531   if (unaligned) {
1532     st->as_Store()->set_unaligned_access();
1533   }
1534   if (mismatched) {
1535     st->as_Store()->set_mismatched_access();
1536   }
1537   st = _gvn.transform(st);
1538   set_memory(st, adr_idx);
1539   // Back-to-back stores can only remove intermediate store with DU info
1540   // so push on worklist for optimizer.
1541   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1542     record_for_igvn(st);
1543 
1544   return st;
1545 }
1546 
1547 Node* GraphKit::access_store_at(Node* ctl,




1498   if (require_atomic_access && bt == T_LONG) {
1499     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1500   } else if (require_atomic_access && bt == T_DOUBLE) {
1501     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1502   } else {
1503     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1504   }
1505   ld = _gvn.transform(ld);
1506   if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1507     // Improve graph before escape analysis and boxing elimination.
1508     record_for_igvn(ld);
1509   }
1510   return ld;
1511 }
1512 
1513 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1514                                 int adr_idx,
1515                                 MemNode::MemOrd mo,
1516                                 bool require_atomic_access,
1517                                 bool unaligned,
1518                                 bool mismatched,
1519                                 int storeZ) {
1520   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1521   const TypePtr* adr_type = NULL;
1522   debug_only(adr_type = C->get_adr_type(adr_idx));
1523   Node *mem = memory(adr_idx);
1524   Node* st;
1525   if (require_atomic_access && bt == T_LONG) {
1526     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1527   } else if (require_atomic_access && bt == T_DOUBLE) {
1528     st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1529   } else if (bt == T_BYTE && storeZ == 0) {
1530     st = new StoreZ0Node(ctl, mem, adr, adr_type, val, mo);
1531   } else if (bt == T_BYTE && storeZ == 1) {
1532     st = new StoreZ1Node(ctl, mem, adr, adr_type, val, mo);
1533   } else {
1534     st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
1535   }
1536   if (unaligned) {
1537     st->as_Store()->set_unaligned_access();
1538   }
1539   if (mismatched) {
1540     st->as_Store()->set_mismatched_access();
1541   }
1542   st = _gvn.transform(st);
1543   set_memory(st, adr_idx);
1544   // Back-to-back stores can only remove intermediate store with DU info
1545   // so push on worklist for optimizer.
1546   if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1547     record_for_igvn(st);
1548 
1549   return st;
1550 }
1551 
1552 Node* GraphKit::access_store_at(Node* ctl,


< prev index next >