< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




  32 #include "gc/shared/collectedHeap.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/idealKit.hpp"
  39 #include "opto/intrinsicnode.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/valuetypenode.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 //----------------------------GraphKit-----------------------------------------
  51 // Main utility constructor.
  52 GraphKit::GraphKit(JVMState* jvms)
  53   : Phase(Phase::Parser),
  54     _env(C->env()),
  55     _gvn(*C->initial_gvn())
  56 {
  57   _exceptions = jvms->map()->next_exception();
  58   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  59   set_jvms(jvms);






  60 }
  61 
  62 // Private constructor for parser.
  63 GraphKit::GraphKit()
  64   : Phase(Phase::Parser),
  65     _env(C->env()),
  66     _gvn(*C->initial_gvn())
  67 {
  68   _exceptions = NULL;
  69   set_map(NULL);
  70   debug_only(_sp = -99);
  71   debug_only(set_bci(-99));
  72 }
  73 
  74 
  75 
  76 //---------------------------clean_stack---------------------------------------
  77 // Clear away rubbish from the stack area of the JVM state.
  78 // This destroys any arguments that may be waiting on the stack.
  79 void GraphKit::clean_stack(int from_sp) {


1366                           int adr_idx,
1367                           MemNode::MemOrd mo,
1368                           LoadNode::ControlDependency control_dependency,
1369                           bool require_atomic_access,
1370                           bool unaligned,
1371                           bool mismatched) {
1372   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1373   const TypePtr* adr_type = NULL; // debug-mode-only argument
1374   debug_only(adr_type = C->get_adr_type(adr_idx));
1375   Node* mem = memory(adr_idx);
1376   Node* ld;
1377   if (require_atomic_access && bt == T_LONG) {
1378     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1379   } else if (require_atomic_access && bt == T_DOUBLE) {
1380     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1381   } else {
1382     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1383   }
1384   ld = _gvn.transform(ld);
1385   if (bt == T_VALUETYPE) {
1386     // Load non-flattened value type from memory. Add a null check and let the
1387     // interpreter take care of initializing the field to the default value type.
1388     Node* null_ctl = top();
1389     ld = null_check_common(ld, bt, false, &null_ctl, false);
1390     if (null_ctl != top()) {
1391       assert(!adr_type->isa_aryptr(), "value type array must be initialized");
1392       PreserveJVMState pjvms(this);
1393       set_control(null_ctl);
1394       uncommon_trap(Deoptimization::reason_null_check(false), Deoptimization::Action_maybe_recompile,
1395                     t->is_valuetypeptr()->value_type()->value_klass(), "uninitialized non-flattened value type");
1396     }
1397     ld = ValueTypeNode::make(gvn(), map()->memory(), ld);
1398   } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1399     // Improve graph before escape analysis and boxing elimination.
1400     record_for_igvn(ld);
1401   }
1402   return ld;
1403 }
1404 
1405 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1406                                 int adr_idx,
1407                                 MemNode::MemOrd mo,
1408                                 bool require_atomic_access,
1409                                 bool unaligned,
1410                                 bool mismatched) {
1411   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1412   const TypePtr* adr_type = NULL;
1413   debug_only(adr_type = C->get_adr_type(adr_idx));
1414   Node *mem = memory(adr_idx);
1415   Node* st;
1416   if (require_atomic_access && bt == T_LONG) {
1417     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);


1518                           Node* val,
1519                           const TypeOopPtr* val_type,
1520                           BasicType bt,
1521                           bool use_precise,
1522                           MemNode::MemOrd mo,
1523                           bool mismatched) {
1524   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1525   // could be delayed during Parse (for example, in adjust_map_after_if()).
1526   // Execute transformation here to avoid barrier generation in such case.
1527   if (_gvn.type(val) == TypePtr::NULL_PTR)
1528     val = _gvn.makecon(TypePtr::NULL_PTR);
1529 
1530   set_control(ctl);
1531   if (stopped()) return top(); // Dead path ?
1532 
1533   assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity");
1534   assert(val != NULL, "not dead path");
1535   uint adr_idx = C->get_alias_index(adr_type);
1536   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1537 
1538   if (bt == T_VALUETYPE) {
1539     // Allocate value type and store oop
1540     val = val->as_ValueType()->allocate(this);
1541   }
1542 
1543   pre_barrier(true /* do_load */,
1544               control(), obj, adr, adr_idx, val, val_type,
1545               NULL /* pre_val */,
1546               bt);
1547 
1548   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1549   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1550   return store;
1551 }
1552 
1553 // Could be an array or object we don't know at compile time (unsafe ref.)
1554 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1555                              Node* obj,   // containing obj
1556                              Node* adr,  // actual adress to store val at
1557                              const TypePtr* adr_type,
1558                              Node* val,
1559                              BasicType bt,
1560                              MemNode::MemOrd mo,


1613   assert(elembt != T_VALUETYPE, "value types are not supported by this method");
1614   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1615   if (elembt == T_NARROWOOP) {
1616     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1617   }
1618   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1619   return ld;
1620 }
1621 
1622 //-------------------------set_arguments_for_java_call-------------------------
1623 // Arguments (pre-popped from the stack) are taken from the JVMS.
1624 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1625   // Add the call arguments:
1626   const TypeTuple* domain = call->tf()->domain_sig();
1627   uint nargs = domain->cnt();
1628   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1629     Node* arg = argument(i-TypeFunc::Parms);
1630     if (ValueTypePassFieldsAsArgs) {
1631       if (arg->is_ValueType()) {
1632         ValueTypeNode* vt = arg->as_ValueType();
1633         if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
1634           // We don't pass value type arguments by reference but instead
1635           // pass each field of the value type
1636           idx += vt->pass_fields(call, idx, *this);
1637           // If a value type argument is passed as fields, attach the Method* to the call site
1638           // to be able to access the extended signature later via attached_method_before_pc().
1639           // For example, see CompiledMethod::preserve_callee_argument_oops().
1640           call->set_override_symbolic_info(true);
1641         } else {
1642           arg = arg->as_ValueType()->allocate(this);
1643           call->init_req(idx, arg);
1644           idx++;
1645         }
1646       } else {
1647         call->init_req(idx, arg);
1648         idx++;
1649       }
1650     } else {
1651       if (arg->is_ValueType()) {
1652         // Pass value type argument via oop to callee
1653         arg = arg->as_ValueType()->allocate(this);
1654       }
1655       call->init_req(i, arg);
1656     }
1657   }
1658 }
1659 
1660 //---------------------------set_edges_for_java_call---------------------------
1661 // Connect a newly created call into the current JVMS.
1662 // A return value node (if any) is returned from set_edges_for_java_call.
1663 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1664 
1665   // Add the predefined inputs:
1666   call->init_req( TypeFunc::Control, control() );
1667   call->init_req( TypeFunc::I_O    , i_o() );
1668   call->init_req( TypeFunc::Memory , reset_memory() );
1669   call->init_req( TypeFunc::FramePtr, frameptr() );
1670   call->init_req( TypeFunc::ReturnAdr, top() );
1671 
1672   add_safepoint_edges(call, must_throw);
1673 
1674   Node* xcall = _gvn.transform(call);
1675 
1676   if (xcall == top()) {
1677     set_control(top());
1678     return;
1679   }
1680   assert(xcall == call, "call identity is stable");
1681 
1682   // Re-use the current map to produce the result.
1683 
1684   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1685   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1686   set_all_memory_call(xcall, separate_io_proj);
1687 
1688   //return xcall;   // no need, caller already has it
1689 }
1690 
1691 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1692   if (stopped())  return top();  // maybe the call folded up?
1693 













1694   // Capture the return value, if any.
1695   Node* ret;
1696   if (call->method() == NULL ||
1697       call->method()->return_type()->basic_type() == T_VOID)
1698         ret = top();
1699   else {
1700     if (!call->tf()->returns_value_type_as_fields()) {
1701       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1702     } else {
1703       // Return of multiple values (value type fields): we create a
1704       // ValueType node, each field is a projection from the call.
1705       const TypeTuple *range_sig = call->tf()->range_sig();
1706       const Type* t = range_sig->field_at(TypeFunc::Parms);
1707       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1708       ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1709       ret = ValueTypeNode::make(_gvn, call, vk, TypeFunc::Parms+1, false);


1710     }
1711   }
1712 
1713   // Note:  Since any out-of-line call can produce an exception,
1714   // we always insert an I_O projection from the call into the result.
1715 
1716   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1717 
1718   if (separate_io_proj) {
1719     // The caller requested separate projections be used by the fall
1720     // through and exceptional paths, so replace the projections for
1721     // the fall through path.
1722     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1723     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1724   }
1725   return ret;
1726 }
1727 
1728 //--------------------set_predefined_input_for_runtime_call--------------------
1729 // Reading and setting the memory state is way conservative here.
1730 // The real problem is that I am not doing real Type analysis on memory,
1731 // so I cannot distinguish card mark stores from other stores.  Across a GC
1732 // point the Store Barrier and the card mark memory has to agree.  I cannot
1733 // have a card mark store and its barrier split across the GC point from
1734 // either above or below.  Here I get that to happen by reading ALL of memory.
1735 // A better answer would be to separate out card marks from other memory.
1736 // For now, return the input memory state, so that it can be reused
1737 // after the call, if this call has restricted memory effects.
1738 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1739   // Set fixed predefined input arguments
1740   Node* memory = reset_memory();
1741   call->init_req( TypeFunc::Control,   control()  );
1742   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1743   call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
1744   call->init_req( TypeFunc::FramePtr,  frameptr() );


3380     }
3381   }
3382 #endif //ASSERT
3383 
3384   return javaoop;
3385 }
3386 
3387 //---------------------------new_instance--------------------------------------
3388 // This routine takes a klass_node which may be constant (for a static type)
3389 // or may be non-constant (for reflective code).  It will work equally well
3390 // for either, and the graph will fold nicely if the optimizer later reduces
3391 // the type to a constant.
3392 // The optional arguments are for specialized use by intrinsics:
3393 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3394 //  - If 'return_size_val', report the the total object size to the caller.
3395 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3396 Node* GraphKit::new_instance(Node* klass_node,
3397                              Node* extra_slow_test,
3398                              Node* *return_size_val,
3399                              bool deoptimize_on_exception,
3400                              ValueTypeNode* value_node) {
3401   // Compute size in doublewords
3402   // The size is always an integral number of doublewords, represented
3403   // as a positive bytewise size stored in the klass's layout_helper.
3404   // The layout_helper also encodes (in a low bit) the need for a slow path.
3405   jint  layout_con = Klass::_lh_neutral_value;
3406   Node* layout_val = get_layout_helper(klass_node, layout_con);
3407   bool  layout_is_con = (layout_val == NULL);
3408 
3409   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
3410   // Generate the initial go-slow test.  It's either ALWAYS (return a
3411   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3412   // case) a computed value derived from the layout_helper.
3413   Node* initial_slow_test = NULL;
3414   if (layout_is_con) {
3415     assert(!StressReflectiveCode, "stress mode does not use these paths");
3416     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3417     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3418   } else {   // reflective case
3419     // This reflective path is used by Unsafe.allocateInstance.
3420     // (It may be stress-tested by specifying StressReflectiveCode.)


3676   if (stopped()) {
3677     set_control(null_ctl); // Always zero
3678     return;
3679   }
3680 
3681   // Prepare for merging control and IO
3682   RegionNode* res_ctl = new RegionNode(3);
3683   res_ctl->init_req(1, null_ctl);
3684   gvn().set_type(res_ctl, Type::CONTROL);
3685   record_for_igvn(res_ctl);
3686   Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3687   gvn().set_type(res_io, Type::ABIO);
3688   record_for_igvn(res_io);
3689 
3690   // TODO comment
3691   SafePointNode* loop_map = NULL;
3692   {
3693     PreserveJVMState pjvms(this);
3694     // Create default value type and store it to memory
3695     Node* oop = ValueTypeNode::make_default(gvn(), vk);
3696     oop = oop->as_ValueType()->allocate(this);
3697 
3698     length = SubI(length, intcon(1));
3699     add_predicate(nargs);
3700     RegionNode* loop = new RegionNode(3);
3701     loop->init_req(1, control());
3702     gvn().set_type(loop, Type::CONTROL);
3703     record_for_igvn(loop);
3704 
3705     Node* index = new PhiNode(loop, TypeInt::INT);
3706     index->init_req(1, intcon(0));
3707     gvn().set_type(index, TypeInt::INT);
3708     record_for_igvn(index);
3709 
3710     // TODO explain why we need to capture all memory
3711     PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3712     mem->init_req(1, reset_memory());
3713     gvn().set_type(mem, Type::MEMORY);
3714     record_for_igvn(mem);
3715     set_control(loop);
3716     set_all_memory(mem);


4547   set_memory(st, TypeAryPtr::BYTES);
4548 }
4549 
4550 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4551   if (!field->is_constant()) {
4552     return NULL; // Field not marked as constant.
4553   }
4554   ciInstance* holder = NULL;
4555   if (!field->is_static()) {
4556     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4557     if (const_oop != NULL && const_oop->is_instance()) {
4558       holder = const_oop->as_instance();
4559     }
4560   }
4561   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4562                                                         /*is_unsigned_load=*/false);
4563   if (con_type != NULL) {
4564     Node* con = makecon(con_type);
4565     if (field->layout_type() == T_VALUETYPE) {
4566       // Load value type from constant oop
4567       con = ValueTypeNode::make(gvn(), map()->memory(), con);
4568     }
4569     return con;
4570   }
4571   return NULL;
4572 }
4573 
4574 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4575   // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4576   // assumption of CCP analysis.
4577   return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4578 }


  32 #include "gc/shared/collectedHeap.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/idealKit.hpp"
  39 #include "opto/intrinsicnode.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/valuetypenode.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 //----------------------------GraphKit-----------------------------------------
  51 // Main utility constructor.
  52 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
  53   : Phase(Phase::Parser),
  54     _env(C->env()),
  55     _gvn((gvn != NULL) ? *gvn : *C->initial_gvn())
  56 {
  57   _exceptions = jvms->map()->next_exception();
  58   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  59   set_jvms(jvms);
  60 #ifdef ASSERT
  61   if (_gvn.is_IterGVN() != NULL) {
  62     // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
  63     _worklist_size = _gvn.C->for_igvn()->size();
  64   }
  65 #endif
  66 }
  67 
  68 // Private constructor for parser.
  69 GraphKit::GraphKit()
  70   : Phase(Phase::Parser),
  71     _env(C->env()),
  72     _gvn(*C->initial_gvn())
  73 {
  74   _exceptions = NULL;
  75   set_map(NULL);
  76   debug_only(_sp = -99);
  77   debug_only(set_bci(-99));
  78 }
  79 
  80 
  81 
  82 //---------------------------clean_stack---------------------------------------
  83 // Clear away rubbish from the stack area of the JVM state.
  84 // This destroys any arguments that may be waiting on the stack.
  85 void GraphKit::clean_stack(int from_sp) {


1372                           int adr_idx,
1373                           MemNode::MemOrd mo,
1374                           LoadNode::ControlDependency control_dependency,
1375                           bool require_atomic_access,
1376                           bool unaligned,
1377                           bool mismatched) {
1378   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1379   const TypePtr* adr_type = NULL; // debug-mode-only argument
1380   debug_only(adr_type = C->get_adr_type(adr_idx));
1381   Node* mem = memory(adr_idx);
1382   Node* ld;
1383   if (require_atomic_access && bt == T_LONG) {
1384     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1385   } else if (require_atomic_access && bt == T_DOUBLE) {
1386     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1387   } else {
1388     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1389   }
1390   ld = _gvn.transform(ld);
1391   if (bt == T_VALUETYPE) {
1392     // Loading a non-flattened value type from memory requires a null check.
1393     ld = ValueTypeNode::make(this, ld, true /* null check */);










1394   } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1395     // Improve graph before escape analysis and boxing elimination.
1396     record_for_igvn(ld);
1397   }
1398   return ld;
1399 }
1400 
1401 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1402                                 int adr_idx,
1403                                 MemNode::MemOrd mo,
1404                                 bool require_atomic_access,
1405                                 bool unaligned,
1406                                 bool mismatched) {
1407   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1408   const TypePtr* adr_type = NULL;
1409   debug_only(adr_type = C->get_adr_type(adr_idx));
1410   Node *mem = memory(adr_idx);
1411   Node* st;
1412   if (require_atomic_access && bt == T_LONG) {
1413     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);


1514                           Node* val,
1515                           const TypeOopPtr* val_type,
1516                           BasicType bt,
1517                           bool use_precise,
1518                           MemNode::MemOrd mo,
1519                           bool mismatched) {
1520   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1521   // could be delayed during Parse (for example, in adjust_map_after_if()).
1522   // Execute transformation here to avoid barrier generation in such case.
1523   if (_gvn.type(val) == TypePtr::NULL_PTR)
1524     val = _gvn.makecon(TypePtr::NULL_PTR);
1525 
1526   set_control(ctl);
1527   if (stopped()) return top(); // Dead path ?
1528 
1529   assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity");
1530   assert(val != NULL, "not dead path");
1531   uint adr_idx = C->get_alias_index(adr_type);
1532   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1533 
1534   if (val->is_ValueType()) {
1535     // Allocate value type and get oop
1536     val = val->as_ValueType()->allocate(this)->get_oop();
1537   }
1538 
1539   pre_barrier(true /* do_load */,
1540               control(), obj, adr, adr_idx, val, val_type,
1541               NULL /* pre_val */,
1542               bt);
1543 
1544   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1545   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1546   return store;
1547 }
1548 
1549 // Could be an array or object we don't know at compile time (unsafe ref.)
1550 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1551                              Node* obj,   // containing obj
1552                              Node* adr,  // actual adress to store val at
1553                              const TypePtr* adr_type,
1554                              Node* val,
1555                              BasicType bt,
1556                              MemNode::MemOrd mo,


1609   assert(elembt != T_VALUETYPE, "value types are not supported by this method");
1610   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1611   if (elembt == T_NARROWOOP) {
1612     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1613   }
1614   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1615   return ld;
1616 }
1617 
1618 //-------------------------set_arguments_for_java_call-------------------------
1619 // Arguments (pre-popped from the stack) are taken from the JVMS.
1620 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1621   // Add the call arguments:
1622   const TypeTuple* domain = call->tf()->domain_sig();
1623   uint nargs = domain->cnt();
1624   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1625     Node* arg = argument(i-TypeFunc::Parms);
1626     if (ValueTypePassFieldsAsArgs) {
1627       if (arg->is_ValueType()) {
1628         ValueTypeNode* vt = arg->as_ValueType();
1629         if (!domain->field_at(i)->is_valuetypeptr()->is__Value()) {
1630           // We don't pass value type arguments by reference but instead
1631           // pass each field of the value type
1632           idx += vt->pass_fields(call, idx, *this);
1633           // If a value type argument is passed as fields, attach the Method* to the call site
1634           // to be able to access the extended signature later via attached_method_before_pc().
1635           // For example, see CompiledMethod::preserve_callee_argument_oops().
1636           call->set_override_symbolic_info(true);
1637         } else {
1638           arg = arg->as_ValueType()->allocate(this)->get_oop();
1639           call->init_req(idx, arg);
1640           idx++;
1641         }
1642       } else {
1643         call->init_req(idx, arg);
1644         idx++;
1645       }
1646     } else {
1647       if (arg->is_ValueType()) {
1648         // Pass value type argument via oop to callee
1649         arg = arg->as_ValueType()->allocate(this)->get_oop();
1650       }
1651       call->init_req(i, arg);
1652     }
1653   }
1654 }
1655 
1656 //---------------------------set_edges_for_java_call---------------------------
1657 // Connect a newly created call into the current JVMS.
1658 // A return value node (if any) is returned from set_edges_for_java_call.
1659 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1660 
1661   // Add the predefined inputs:
1662   call->init_req( TypeFunc::Control, control() );
1663   call->init_req( TypeFunc::I_O    , i_o() );
1664   call->init_req( TypeFunc::Memory , reset_memory() );
1665   call->init_req( TypeFunc::FramePtr, frameptr() );
1666   call->init_req( TypeFunc::ReturnAdr, top() );
1667 
1668   add_safepoint_edges(call, must_throw);
1669 
1670   Node* xcall = _gvn.transform(call);
1671 
1672   if (xcall == top()) {
1673     set_control(top());
1674     return;
1675   }
1676   assert(xcall == call, "call identity is stable");
1677 
1678   // Re-use the current map to produce the result.
1679 
1680   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1681   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1682   set_all_memory_call(xcall, separate_io_proj);
1683 
1684   //return xcall;   // no need, caller already has it
1685 }
1686 
1687 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1688   if (stopped())  return top();  // maybe the call folded up?
1689 
1690   // Note:  Since any out-of-line call can produce an exception,
1691   // we always insert an I_O projection from the call into the result.
1692 
1693   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1694 
1695   if (separate_io_proj) {
1696     // The caller requested separate projections be used by the fall
1697     // through and exceptional paths, so replace the projections for
1698     // the fall through path.
1699     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1700     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1701   }
1702 
1703   // Capture the return value, if any.
1704   Node* ret;
1705   if (call->method() == NULL ||
1706       call->method()->return_type()->basic_type() == T_VOID) {
1707     ret = top();
1708   } else {
1709     if (!call->tf()->returns_value_type_as_fields()) {
1710       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1711     } else {
1712       // Return of multiple values (value type fields): we create a
1713       // ValueType node, each field is a projection from the call.
1714       const TypeTuple* range_sig = call->tf()->range_sig();
1715       const Type* t = range_sig->field_at(TypeFunc::Parms);
1716       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1717       ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1718       Node* ctl = control();
1719       ret = ValueTypeNode::make(_gvn, ctl, merged_memory(), call, vk, TypeFunc::Parms+1, false);
1720       set_control(ctl);
1721     }
1722   }
1723 












1724   return ret;
1725 }
1726 
1727 //--------------------set_predefined_input_for_runtime_call--------------------
1728 // Reading and setting the memory state is way conservative here.
1729 // The real problem is that I am not doing real Type analysis on memory,
1730 // so I cannot distinguish card mark stores from other stores.  Across a GC
1731 // point the Store Barrier and the card mark memory has to agree.  I cannot
1732 // have a card mark store and its barrier split across the GC point from
1733 // either above or below.  Here I get that to happen by reading ALL of memory.
1734 // A better answer would be to separate out card marks from other memory.
1735 // For now, return the input memory state, so that it can be reused
1736 // after the call, if this call has restricted memory effects.
1737 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1738   // Set fixed predefined input arguments
1739   Node* memory = reset_memory();
1740   call->init_req( TypeFunc::Control,   control()  );
1741   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1742   call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
1743   call->init_req( TypeFunc::FramePtr,  frameptr() );


3379     }
3380   }
3381 #endif //ASSERT
3382 
3383   return javaoop;
3384 }
3385 
3386 //---------------------------new_instance--------------------------------------
3387 // This routine takes a klass_node which may be constant (for a static type)
3388 // or may be non-constant (for reflective code).  It will work equally well
3389 // for either, and the graph will fold nicely if the optimizer later reduces
3390 // the type to a constant.
3391 // The optional arguments are for specialized use by intrinsics:
3392 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3393 //  - If 'return_size_val', report the the total object size to the caller.
3394 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3395 Node* GraphKit::new_instance(Node* klass_node,
3396                              Node* extra_slow_test,
3397                              Node* *return_size_val,
3398                              bool deoptimize_on_exception,
3399                              ValueTypeBaseNode* value_node) {
3400   // Compute size in doublewords
3401   // The size is always an integral number of doublewords, represented
3402   // as a positive bytewise size stored in the klass's layout_helper.
3403   // The layout_helper also encodes (in a low bit) the need for a slow path.
3404   jint  layout_con = Klass::_lh_neutral_value;
3405   Node* layout_val = get_layout_helper(klass_node, layout_con);
3406   bool  layout_is_con = (layout_val == NULL);
3407 
3408   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
3409   // Generate the initial go-slow test.  It's either ALWAYS (return a
3410   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3411   // case) a computed value derived from the layout_helper.
3412   Node* initial_slow_test = NULL;
3413   if (layout_is_con) {
3414     assert(!StressReflectiveCode, "stress mode does not use these paths");
3415     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3416     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3417   } else {   // reflective case
3418     // This reflective path is used by Unsafe.allocateInstance.
3419     // (It may be stress-tested by specifying StressReflectiveCode.)


3675   if (stopped()) {
3676     set_control(null_ctl); // Always zero
3677     return;
3678   }
3679 
3680   // Prepare for merging control and IO
3681   RegionNode* res_ctl = new RegionNode(3);
3682   res_ctl->init_req(1, null_ctl);
3683   gvn().set_type(res_ctl, Type::CONTROL);
3684   record_for_igvn(res_ctl);
3685   Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3686   gvn().set_type(res_io, Type::ABIO);
3687   record_for_igvn(res_io);
3688 
3689   // TODO comment
3690   SafePointNode* loop_map = NULL;
3691   {
3692     PreserveJVMState pjvms(this);
3693     // Create default value type and store it to memory
3694     Node* oop = ValueTypeNode::make_default(gvn(), vk);
3695     oop = oop->as_ValueType()->allocate(this)->get_oop();
3696 
3697     length = SubI(length, intcon(1));
3698     add_predicate(nargs);
3699     RegionNode* loop = new RegionNode(3);
3700     loop->init_req(1, control());
3701     gvn().set_type(loop, Type::CONTROL);
3702     record_for_igvn(loop);
3703 
3704     Node* index = new PhiNode(loop, TypeInt::INT);
3705     index->init_req(1, intcon(0));
3706     gvn().set_type(index, TypeInt::INT);
3707     record_for_igvn(index);
3708 
3709     // TODO explain why we need to capture all memory
3710     PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3711     mem->init_req(1, reset_memory());
3712     gvn().set_type(mem, Type::MEMORY);
3713     record_for_igvn(mem);
3714     set_control(loop);
3715     set_all_memory(mem);


4546   set_memory(st, TypeAryPtr::BYTES);
4547 }
4548 
4549 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4550   if (!field->is_constant()) {
4551     return NULL; // Field not marked as constant.
4552   }
4553   ciInstance* holder = NULL;
4554   if (!field->is_static()) {
4555     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4556     if (const_oop != NULL && const_oop->is_instance()) {
4557       holder = const_oop->as_instance();
4558     }
4559   }
4560   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4561                                                         /*is_unsigned_load=*/false);
4562   if (con_type != NULL) {
4563     Node* con = makecon(con_type);
4564     if (field->layout_type() == T_VALUETYPE) {
4565       // Load value type from constant oop
4566       con = ValueTypeNode::make(this, con);
4567     }
4568     return con;
4569   }
4570   return NULL;
4571 }
4572 
4573 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4574   // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4575   // assumption of CCP analysis.
4576   return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4577 }
< prev index next >