< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page




  32 #include "gc/shared/collectedHeap.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/idealKit.hpp"
  39 #include "opto/intrinsicnode.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/valuetypenode.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 //----------------------------GraphKit-----------------------------------------
  51 // Main utility constructor.
  52 GraphKit::GraphKit(JVMState* jvms)
  53   : Phase(Phase::Parser),
  54     _env(C->env()),
  55     _gvn(*C->initial_gvn())
  56 {
  57   _exceptions = jvms->map()->next_exception();
  58   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  59   set_jvms(jvms);







  60 }
  61 
  62 // Private constructor for parser.
  63 GraphKit::GraphKit()
  64   : Phase(Phase::Parser),
  65     _env(C->env()),
  66     _gvn(*C->initial_gvn())
  67 {
  68   _exceptions = NULL;
  69   set_map(NULL);
  70   debug_only(_sp = -99);
  71   debug_only(set_bci(-99));
  72 }
  73 
  74 
  75 
  76 //---------------------------clean_stack---------------------------------------
  77 // Clear away rubbish from the stack area of the JVM state.
  78 // This destroys any arguments that may be waiting on the stack.
  79 void GraphKit::clean_stack(int from_sp) {


1366                           int adr_idx,
1367                           MemNode::MemOrd mo,
1368                           LoadNode::ControlDependency control_dependency,
1369                           bool require_atomic_access,
1370                           bool unaligned,
1371                           bool mismatched) {
1372   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1373   const TypePtr* adr_type = NULL; // debug-mode-only argument
1374   debug_only(adr_type = C->get_adr_type(adr_idx));
1375   Node* mem = memory(adr_idx);
1376   Node* ld;
1377   if (require_atomic_access && bt == T_LONG) {
1378     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1379   } else if (require_atomic_access && bt == T_DOUBLE) {
1380     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1381   } else {
1382     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1383   }
1384   ld = _gvn.transform(ld);
1385   if (bt == T_VALUETYPE) {
1386     // Load non-flattened value type from memory. Add a null check and let the
1387     // interpreter take care of initializing the field to the default value type.
1388     Node* null_ctl = top();
1389     ld = null_check_common(ld, bt, false, &null_ctl, false);
1390     if (null_ctl != top()) {
1391       assert(!adr_type->isa_aryptr(), "value type array must be initialized");
1392       PreserveJVMState pjvms(this);
1393       set_control(null_ctl);
1394       uncommon_trap(Deoptimization::reason_null_check(false), Deoptimization::Action_maybe_recompile,
1395                     t->is_valuetypeptr()->value_type()->value_klass(), "uninitialized non-flattened value type");
1396     }
1397     ld = ValueTypeNode::make(gvn(), map()->memory(), ld);
1398   } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1399     // Improve graph before escape analysis and boxing elimination.
1400     record_for_igvn(ld);
1401   }
1402   return ld;
1403 }
1404 
1405 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1406                                 int adr_idx,
1407                                 MemNode::MemOrd mo,
1408                                 bool require_atomic_access,
1409                                 bool unaligned,
1410                                 bool mismatched) {
1411   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1412   const TypePtr* adr_type = NULL;
1413   debug_only(adr_type = C->get_adr_type(adr_idx));
1414   Node *mem = memory(adr_idx);
1415   Node* st;
1416   if (require_atomic_access && bt == T_LONG) {
1417     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);


1518                           Node* val,
1519                           const TypeOopPtr* val_type,
1520                           BasicType bt,
1521                           bool use_precise,
1522                           MemNode::MemOrd mo,
1523                           bool mismatched) {
1524   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1525   // could be delayed during Parse (for example, in adjust_map_after_if()).
1526   // Execute transformation here to avoid barrier generation in such case.
1527   if (_gvn.type(val) == TypePtr::NULL_PTR)
1528     val = _gvn.makecon(TypePtr::NULL_PTR);
1529 
1530   set_control(ctl);
1531   if (stopped()) return top(); // Dead path ?
1532 
1533   assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity");
1534   assert(val != NULL, "not dead path");
1535   uint adr_idx = C->get_alias_index(adr_type);
1536   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1537 
1538   if (bt == T_VALUETYPE) {
1539     // Allocate value type and store oop
1540     val = val->as_ValueType()->allocate(this);
1541   }
1542 
1543   pre_barrier(true /* do_load */,
1544               control(), obj, adr, adr_idx, val, val_type,
1545               NULL /* pre_val */,
1546               bt);
1547 
1548   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1549   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1550   return store;
1551 }
1552 
1553 // Could be an array or object we don't know at compile time (unsafe ref.)
1554 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1555                              Node* obj,   // containing obj
1556                              Node* adr,  // actual adress to store val at
1557                              const TypePtr* adr_type,
1558                              Node* val,
1559                              BasicType bt,
1560                              MemNode::MemOrd mo,


1613   assert(elembt != T_VALUETYPE, "value types are not supported by this method");
1614   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1615   if (elembt == T_NARROWOOP) {
1616     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1617   }
1618   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1619   return ld;
1620 }
1621 
1622 //-------------------------set_arguments_for_java_call-------------------------
1623 // Arguments (pre-popped from the stack) are taken from the JVMS.
1624 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1625   // Add the call arguments:
1626   const TypeTuple* domain = call->tf()->domain_sig();
1627   uint nargs = domain->cnt();
1628   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1629     Node* arg = argument(i-TypeFunc::Parms);
1630     if (ValueTypePassFieldsAsArgs) {
1631       if (arg->is_ValueType()) {
1632         ValueTypeNode* vt = arg->as_ValueType();
1633         if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
1634           // We don't pass value type arguments by reference but instead
1635           // pass each field of the value type
1636           idx += vt->pass_fields(call, idx, *this);
1637           // If a value type argument is passed as fields, attach the Method* to the call site
1638           // to be able to access the extended signature later via attached_method_before_pc().
1639           // For example, see CompiledMethod::preserve_callee_argument_oops().
1640           call->set_override_symbolic_info(true);
1641         } else {
1642           arg = arg->as_ValueType()->allocate(this);
1643           call->init_req(idx, arg);
1644           idx++;
1645         }
1646       } else {
1647         call->init_req(idx, arg);
1648         idx++;
1649       }
1650     } else {
1651       if (arg->is_ValueType()) {
1652         // Pass value type argument via oop to callee
1653         arg = arg->as_ValueType()->allocate(this);
1654       }
1655       call->init_req(i, arg);
1656     }
1657   }
1658 }
1659 
1660 //---------------------------set_edges_for_java_call---------------------------
1661 // Connect a newly created call into the current JVMS.
1662 // A return value node (if any) is returned from set_edges_for_java_call.
1663 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1664 
1665   // Add the predefined inputs:
1666   call->init_req( TypeFunc::Control, control() );
1667   call->init_req( TypeFunc::I_O    , i_o() );
1668   call->init_req( TypeFunc::Memory , reset_memory() );
1669   call->init_req( TypeFunc::FramePtr, frameptr() );
1670   call->init_req( TypeFunc::ReturnAdr, top() );
1671 
1672   add_safepoint_edges(call, must_throw);
1673 
1674   Node* xcall = _gvn.transform(call);
1675 
1676   if (xcall == top()) {
1677     set_control(top());
1678     return;
1679   }
1680   assert(xcall == call, "call identity is stable");
1681 
1682   // Re-use the current map to produce the result.
1683 
1684   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1685   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1686   set_all_memory_call(xcall, separate_io_proj);
1687 
1688   //return xcall;   // no need, caller already has it
1689 }
1690 
1691 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1692   if (stopped())  return top();  // maybe the call folded up?
1693 













1694   // Capture the return value, if any.
1695   Node* ret;
1696   if (call->method() == NULL ||
1697       call->method()->return_type()->basic_type() == T_VOID)
1698         ret = top();
1699   else {
1700     if (!call->tf()->returns_value_type_as_fields()) {
1701       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1702     } else {
1703       // Return of multiple values (value type fields): we create a
1704       // ValueType node, each field is a projection from the call.
1705       const TypeTuple *range_sig = call->tf()->range_sig();
1706       const Type* t = range_sig->field_at(TypeFunc::Parms);
1707       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1708       ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1709       ret = ValueTypeNode::make(_gvn, call, vk, TypeFunc::Parms+1, false);


1710     }
1711   }
1712 
1713   // Note:  Since any out-of-line call can produce an exception,
1714   // we always insert an I_O projection from the call into the result.
1715 
1716   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1717 
1718   if (separate_io_proj) {
1719     // The caller requested separate projections be used by the fall
1720     // through and exceptional paths, so replace the projections for
1721     // the fall through path.
1722     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1723     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1724   }
1725   return ret;
1726 }
1727 
1728 //--------------------set_predefined_input_for_runtime_call--------------------
1729 // Reading and setting the memory state is way conservative here.
1730 // The real problem is that I am not doing real Type analysis on memory,
1731 // so I cannot distinguish card mark stores from other stores.  Across a GC
1732 // point the Store Barrier and the card mark memory has to agree.  I cannot
1733 // have a card mark store and its barrier split across the GC point from
1734 // either above or below.  Here I get that to happen by reading ALL of memory.
1735 // A better answer would be to separate out card marks from other memory.
1736 // For now, return the input memory state, so that it can be reused
1737 // after the call, if this call has restricted memory effects.
1738 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1739   // Set fixed predefined input arguments
1740   Node* memory = reset_memory();
1741   call->init_req( TypeFunc::Control,   control()  );
1742   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1743   call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
1744   call->init_req( TypeFunc::FramePtr,  frameptr() );


3380     }
3381   }
3382 #endif //ASSERT
3383 
3384   return javaoop;
3385 }
3386 
3387 //---------------------------new_instance--------------------------------------
3388 // This routine takes a klass_node which may be constant (for a static type)
3389 // or may be non-constant (for reflective code).  It will work equally well
3390 // for either, and the graph will fold nicely if the optimizer later reduces
3391 // the type to a constant.
3392 // The optional arguments are for specialized use by intrinsics:
3393 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3394 //  - If 'return_size_val', report the the total object size to the caller.
3395 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3396 Node* GraphKit::new_instance(Node* klass_node,
3397                              Node* extra_slow_test,
3398                              Node* *return_size_val,
3399                              bool deoptimize_on_exception,
3400                              ValueTypeNode* value_node) {
3401   // Compute size in doublewords
3402   // The size is always an integral number of doublewords, represented
3403   // as a positive bytewise size stored in the klass's layout_helper.
3404   // The layout_helper also encodes (in a low bit) the need for a slow path.
3405   jint  layout_con = Klass::_lh_neutral_value;
3406   Node* layout_val = get_layout_helper(klass_node, layout_con);
3407   bool  layout_is_con = (layout_val == NULL);
3408 
3409   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
3410   // Generate the initial go-slow test.  It's either ALWAYS (return a
3411   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3412   // case) a computed value derived from the layout_helper.
3413   Node* initial_slow_test = NULL;
3414   if (layout_is_con) {
3415     assert(!StressReflectiveCode, "stress mode does not use these paths");
3416     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3417     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3418   } else {   // reflective case
3419     // This reflective path is used by Unsafe.allocateInstance.
3420     // (It may be stress-tested by specifying StressReflectiveCode.)


3676   if (stopped()) {
3677     set_control(null_ctl); // Always zero
3678     return;
3679   }
3680 
3681   // Prepare for merging control and IO
3682   RegionNode* res_ctl = new RegionNode(3);
3683   res_ctl->init_req(1, null_ctl);
3684   gvn().set_type(res_ctl, Type::CONTROL);
3685   record_for_igvn(res_ctl);
3686   Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3687   gvn().set_type(res_io, Type::ABIO);
3688   record_for_igvn(res_io);
3689 
3690   // TODO comment
3691   SafePointNode* loop_map = NULL;
3692   {
3693     PreserveJVMState pjvms(this);
3694     // Create default value type and store it to memory
3695     Node* oop = ValueTypeNode::make_default(gvn(), vk);
3696     oop = oop->as_ValueType()->allocate(this);
3697 
3698     length = SubI(length, intcon(1));
3699     add_predicate(nargs);
3700     RegionNode* loop = new RegionNode(3);
3701     loop->init_req(1, control());
3702     gvn().set_type(loop, Type::CONTROL);
3703     record_for_igvn(loop);
3704 
3705     Node* index = new PhiNode(loop, TypeInt::INT);
3706     index->init_req(1, intcon(0));
3707     gvn().set_type(index, TypeInt::INT);
3708     record_for_igvn(index);
3709 
3710     // TODO explain why we need to capture all memory
3711     PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3712     mem->init_req(1, reset_memory());
3713     gvn().set_type(mem, Type::MEMORY);
3714     record_for_igvn(mem);
3715     set_control(loop);
3716     set_all_memory(mem);


4547   set_memory(st, TypeAryPtr::BYTES);
4548 }
4549 
4550 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4551   if (!field->is_constant()) {
4552     return NULL; // Field not marked as constant.
4553   }
4554   ciInstance* holder = NULL;
4555   if (!field->is_static()) {
4556     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4557     if (const_oop != NULL && const_oop->is_instance()) {
4558       holder = const_oop->as_instance();
4559     }
4560   }
4561   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4562                                                         /*is_unsigned_load=*/false);
4563   if (con_type != NULL) {
4564     Node* con = makecon(con_type);
4565     if (field->layout_type() == T_VALUETYPE) {
4566       // Load value type from constant oop
4567       con = ValueTypeNode::make(gvn(), map()->memory(), con);
4568     }
4569     return con;
4570   }
4571   return NULL;
4572 }
4573 
4574 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4575   // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4576   // assumption of CCP analysis.
4577   return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4578 }


  32 #include "gc/shared/collectedHeap.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/graphKit.hpp"
  38 #include "opto/idealKit.hpp"
  39 #include "opto/intrinsicnode.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/machnode.hpp"
  42 #include "opto/opaquenode.hpp"
  43 #include "opto/parse.hpp"
  44 #include "opto/rootnode.hpp"
  45 #include "opto/runtime.hpp"
  46 #include "opto/valuetypenode.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 //----------------------------GraphKit-----------------------------------------
  51 // Main utility constructor.
  52 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
  53   : Phase(Phase::Parser),
  54     _env(C->env()),
  55     _gvn((gvn != NULL) ? *gvn : *C->initial_gvn())
  56 {
  57   _exceptions = jvms->map()->next_exception();
  58   if (_exceptions != NULL)  jvms->map()->set_next_exception(NULL);
  59   set_jvms(jvms);
  60 #ifdef ASSERT
  61   if (_gvn.is_IterGVN() != NULL) {
  62     assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
  63     // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
  64     _worklist_size = _gvn.C->for_igvn()->size();
  65   }
  66 #endif
  67 }
  68 
  69 // Private constructor for parser.
  70 GraphKit::GraphKit()
  71   : Phase(Phase::Parser),
  72     _env(C->env()),
  73     _gvn(*C->initial_gvn())
  74 {
  75   _exceptions = NULL;
  76   set_map(NULL);
  77   debug_only(_sp = -99);
  78   debug_only(set_bci(-99));
  79 }
  80 
  81 
  82 
  83 //---------------------------clean_stack---------------------------------------
  84 // Clear away rubbish from the stack area of the JVM state.
  85 // This destroys any arguments that may be waiting on the stack.
  86 void GraphKit::clean_stack(int from_sp) {


1373                           int adr_idx,
1374                           MemNode::MemOrd mo,
1375                           LoadNode::ControlDependency control_dependency,
1376                           bool require_atomic_access,
1377                           bool unaligned,
1378                           bool mismatched) {
1379   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1380   const TypePtr* adr_type = NULL; // debug-mode-only argument
1381   debug_only(adr_type = C->get_adr_type(adr_idx));
1382   Node* mem = memory(adr_idx);
1383   Node* ld;
1384   if (require_atomic_access && bt == T_LONG) {
1385     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1386   } else if (require_atomic_access && bt == T_DOUBLE) {
1387     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1388   } else {
1389     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1390   }
1391   ld = _gvn.transform(ld);
1392   if (bt == T_VALUETYPE) {
1393     // Loading a non-flattened value type from memory requires a null check.
1394     ld = ValueTypeNode::make(this, ld, true /* null check */);










1395   } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1396     // Improve graph before escape analysis and boxing elimination.
1397     record_for_igvn(ld);
1398   }
1399   return ld;
1400 }
1401 
1402 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1403                                 int adr_idx,
1404                                 MemNode::MemOrd mo,
1405                                 bool require_atomic_access,
1406                                 bool unaligned,
1407                                 bool mismatched) {
1408   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1409   const TypePtr* adr_type = NULL;
1410   debug_only(adr_type = C->get_adr_type(adr_idx));
1411   Node *mem = memory(adr_idx);
1412   Node* st;
1413   if (require_atomic_access && bt == T_LONG) {
1414     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);


1515                           Node* val,
1516                           const TypeOopPtr* val_type,
1517                           BasicType bt,
1518                           bool use_precise,
1519                           MemNode::MemOrd mo,
1520                           bool mismatched) {
1521   // Transformation of a value which could be NULL pointer (CastPP #NULL)
1522   // could be delayed during Parse (for example, in adjust_map_after_if()).
1523   // Execute transformation here to avoid barrier generation in such case.
1524   if (_gvn.type(val) == TypePtr::NULL_PTR)
1525     val = _gvn.makecon(TypePtr::NULL_PTR);
1526 
1527   set_control(ctl);
1528   if (stopped()) return top(); // Dead path ?
1529 
1530   assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity");
1531   assert(val != NULL, "not dead path");
1532   uint adr_idx = C->get_alias_index(adr_type);
1533   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1534 
1535   if (val->is_ValueType()) {
1536     // Allocate value type and get oop
1537     val = val->as_ValueType()->allocate(this)->get_oop();
1538   }
1539 
1540   pre_barrier(true /* do_load */,
1541               control(), obj, adr, adr_idx, val, val_type,
1542               NULL /* pre_val */,
1543               bt);
1544 
1545   Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1546   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1547   return store;
1548 }
1549 
1550 // Could be an array or object we don't know at compile time (unsafe ref.)
1551 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1552                              Node* obj,   // containing obj
1553                              Node* adr,  // actual adress to store val at
1554                              const TypePtr* adr_type,
1555                              Node* val,
1556                              BasicType bt,
1557                              MemNode::MemOrd mo,


1610   assert(elembt != T_VALUETYPE, "value types are not supported by this method");
1611   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1612   if (elembt == T_NARROWOOP) {
1613     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1614   }
1615   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1616   return ld;
1617 }
1618 
1619 //-------------------------set_arguments_for_java_call-------------------------
1620 // Arguments (pre-popped from the stack) are taken from the JVMS.
1621 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1622   // Add the call arguments:
1623   const TypeTuple* domain = call->tf()->domain_sig();
1624   uint nargs = domain->cnt();
1625   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1626     Node* arg = argument(i-TypeFunc::Parms);
1627     if (ValueTypePassFieldsAsArgs) {
1628       if (arg->is_ValueType()) {
1629         ValueTypeNode* vt = arg->as_ValueType();
1630         if (!domain->field_at(i)->is_valuetypeptr()->is__Value()) {
1631           // We don't pass value type arguments by reference but instead
1632           // pass each field of the value type
1633           idx += vt->pass_fields(call, idx, *this);
1634           // If a value type argument is passed as fields, attach the Method* to the call site
1635           // to be able to access the extended signature later via attached_method_before_pc().
1636           // For example, see CompiledMethod::preserve_callee_argument_oops().
1637           call->set_override_symbolic_info(true);
1638         } else {
1639           arg = arg->as_ValueType()->allocate(this)->get_oop();
1640           call->init_req(idx, arg);
1641           idx++;
1642         }
1643       } else {
1644         call->init_req(idx, arg);
1645         idx++;
1646       }
1647     } else {
1648       if (arg->is_ValueType()) {
1649         // Pass value type argument via oop to callee
1650         arg = arg->as_ValueType()->allocate(this)->get_oop();
1651       }
1652       call->init_req(i, arg);
1653     }
1654   }
1655 }
1656 
1657 //---------------------------set_edges_for_java_call---------------------------
1658 // Connect a newly created call into the current JVMS.
1659 // A return value node (if any) is returned from set_edges_for_java_call.
1660 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1661 
1662   // Add the predefined inputs:
1663   call->init_req( TypeFunc::Control, control() );
1664   call->init_req( TypeFunc::I_O    , i_o() );
1665   call->init_req( TypeFunc::Memory , reset_memory() );
1666   call->init_req( TypeFunc::FramePtr, frameptr() );
1667   call->init_req( TypeFunc::ReturnAdr, top() );
1668 
1669   add_safepoint_edges(call, must_throw);
1670 
1671   Node* xcall = _gvn.transform(call);
1672 
1673   if (xcall == top()) {
1674     set_control(top());
1675     return;
1676   }
1677   assert(xcall == call, "call identity is stable");
1678 
1679   // Re-use the current map to produce the result.
1680 
1681   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1682   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1683   set_all_memory_call(xcall, separate_io_proj);
1684 
1685   //return xcall;   // no need, caller already has it
1686 }
1687 
1688 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1689   if (stopped())  return top();  // maybe the call folded up?
1690 
1691   // Note:  Since any out-of-line call can produce an exception,
1692   // we always insert an I_O projection from the call into the result.
1693 
1694   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1695 
1696   if (separate_io_proj) {
1697     // The caller requested separate projections be used by the fall
1698     // through and exceptional paths, so replace the projections for
1699     // the fall through path.
1700     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1701     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1702   }
1703 
1704   // Capture the return value, if any.
1705   Node* ret;
1706   if (call->method() == NULL ||
1707       call->method()->return_type()->basic_type() == T_VOID) {
1708     ret = top();
1709   } else {
1710     if (!call->tf()->returns_value_type_as_fields()) {
1711       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1712     } else {
1713       // Return of multiple values (value type fields): we create a
1714       // ValueType node, each field is a projection from the call.
1715       const TypeTuple* range_sig = call->tf()->range_sig();
1716       const Type* t = range_sig->field_at(TypeFunc::Parms);
1717       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1718       ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1719       Node* ctl = control();
1720       ret = ValueTypeNode::make(_gvn, ctl, merged_memory(), call, vk, TypeFunc::Parms+1, false);
1721       set_control(ctl);
1722     }
1723   }
1724 












1725   return ret;
1726 }
1727 
1728 //--------------------set_predefined_input_for_runtime_call--------------------
1729 // Reading and setting the memory state is way conservative here.
1730 // The real problem is that I am not doing real Type analysis on memory,
1731 // so I cannot distinguish card mark stores from other stores.  Across a GC
1732 // point the Store Barrier and the card mark memory has to agree.  I cannot
1733 // have a card mark store and its barrier split across the GC point from
1734 // either above or below.  Here I get that to happen by reading ALL of memory.
1735 // A better answer would be to separate out card marks from other memory.
1736 // For now, return the input memory state, so that it can be reused
1737 // after the call, if this call has restricted memory effects.
1738 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1739   // Set fixed predefined input arguments
1740   Node* memory = reset_memory();
1741   call->init_req( TypeFunc::Control,   control()  );
1742   call->init_req( TypeFunc::I_O,       top()      ); // does no i/o
1743   call->init_req( TypeFunc::Memory,    memory     ); // may gc ptrs
1744   call->init_req( TypeFunc::FramePtr,  frameptr() );


3380     }
3381   }
3382 #endif //ASSERT
3383 
3384   return javaoop;
3385 }
3386 
3387 //---------------------------new_instance--------------------------------------
3388 // This routine takes a klass_node which may be constant (for a static type)
3389 // or may be non-constant (for reflective code).  It will work equally well
3390 // for either, and the graph will fold nicely if the optimizer later reduces
3391 // the type to a constant.
3392 // The optional arguments are for specialized use by intrinsics:
3393 //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3394 //  - If 'return_size_val', report the the total object size to the caller.
3395 //  - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3396 Node* GraphKit::new_instance(Node* klass_node,
3397                              Node* extra_slow_test,
3398                              Node* *return_size_val,
3399                              bool deoptimize_on_exception,
3400                              ValueTypeBaseNode* value_node) {
3401   // Compute size in doublewords
3402   // The size is always an integral number of doublewords, represented
3403   // as a positive bytewise size stored in the klass's layout_helper.
3404   // The layout_helper also encodes (in a low bit) the need for a slow path.
3405   jint  layout_con = Klass::_lh_neutral_value;
3406   Node* layout_val = get_layout_helper(klass_node, layout_con);
3407   bool  layout_is_con = (layout_val == NULL);
3408 
3409   if (extra_slow_test == NULL)  extra_slow_test = intcon(0);
3410   // Generate the initial go-slow test.  It's either ALWAYS (return a
3411   // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3412   // case) a computed value derived from the layout_helper.
3413   Node* initial_slow_test = NULL;
3414   if (layout_is_con) {
3415     assert(!StressReflectiveCode, "stress mode does not use these paths");
3416     bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3417     initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3418   } else {   // reflective case
3419     // This reflective path is used by Unsafe.allocateInstance.
3420     // (It may be stress-tested by specifying StressReflectiveCode.)


3676   if (stopped()) {
3677     set_control(null_ctl); // Always zero
3678     return;
3679   }
3680 
3681   // Prepare for merging control and IO
3682   RegionNode* res_ctl = new RegionNode(3);
3683   res_ctl->init_req(1, null_ctl);
3684   gvn().set_type(res_ctl, Type::CONTROL);
3685   record_for_igvn(res_ctl);
3686   Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3687   gvn().set_type(res_io, Type::ABIO);
3688   record_for_igvn(res_io);
3689 
3690   // TODO comment
3691   SafePointNode* loop_map = NULL;
3692   {
3693     PreserveJVMState pjvms(this);
3694     // Create default value type and store it to memory
3695     Node* oop = ValueTypeNode::make_default(gvn(), vk);
3696     oop = oop->as_ValueType()->allocate(this)->get_oop();
3697 
3698     length = SubI(length, intcon(1));
3699     add_predicate(nargs);
3700     RegionNode* loop = new RegionNode(3);
3701     loop->init_req(1, control());
3702     gvn().set_type(loop, Type::CONTROL);
3703     record_for_igvn(loop);
3704 
3705     Node* index = new PhiNode(loop, TypeInt::INT);
3706     index->init_req(1, intcon(0));
3707     gvn().set_type(index, TypeInt::INT);
3708     record_for_igvn(index);
3709 
3710     // TODO explain why we need to capture all memory
3711     PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3712     mem->init_req(1, reset_memory());
3713     gvn().set_type(mem, Type::MEMORY);
3714     record_for_igvn(mem);
3715     set_control(loop);
3716     set_all_memory(mem);


4547   set_memory(st, TypeAryPtr::BYTES);
4548 }
4549 
4550 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4551   if (!field->is_constant()) {
4552     return NULL; // Field not marked as constant.
4553   }
4554   ciInstance* holder = NULL;
4555   if (!field->is_static()) {
4556     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4557     if (const_oop != NULL && const_oop->is_instance()) {
4558       holder = const_oop->as_instance();
4559     }
4560   }
4561   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4562                                                         /*is_unsigned_load=*/false);
4563   if (con_type != NULL) {
4564     Node* con = makecon(con_type);
4565     if (field->layout_type() == T_VALUETYPE) {
4566       // Load value type from constant oop
4567       con = ValueTypeNode::make(this, con);
4568     }
4569     return con;
4570   }
4571   return NULL;
4572 }
4573 
4574 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4575   // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4576   // assumption of CCP analysis.
4577   return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4578 }
< prev index next >