1527 int adr_idx,
1528 MemNode::MemOrd mo,
1529 LoadNode::ControlDependency control_dependency,
1530 bool require_atomic_access,
1531 bool unaligned,
1532 bool mismatched) {
1533 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1534 const TypePtr* adr_type = NULL; // debug-mode-only argument
1535 debug_only(adr_type = C->get_adr_type(adr_idx));
1536 Node* mem = memory(adr_idx);
1537 Node* ld;
1538 if (require_atomic_access && bt == T_LONG) {
1539 ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1540 } else if (require_atomic_access && bt == T_DOUBLE) {
1541 ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1542 } else {
1543 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1544 }
1545 ld = _gvn.transform(ld);
1546
1547 if (((bt == T_OBJECT || bt == T_VALUETYPE || bt == T_VALUETYPEPTR) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1548 // Improve graph before escape analysis and boxing elimination.
1549 record_for_igvn(ld);
1550 }
1551 return ld;
1552 }
1553
1554 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1555 int adr_idx,
1556 MemNode::MemOrd mo,
1557 bool require_atomic_access,
1558 bool unaligned,
1559 bool mismatched) {
1560 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1561 const TypePtr* adr_type = NULL;
1562 debug_only(adr_type = C->get_adr_type(adr_idx));
1563 Node *mem = memory(adr_idx);
1564 Node* st;
1565 if (require_atomic_access && bt == T_LONG) {
1566 st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1567 } else if (require_atomic_access && bt == T_DOUBLE) {
1777 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1778 if (elembt == T_NARROWOOP) {
1779 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1780 }
1781 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1782 return ld;
1783 }
1784
1785 //-------------------------set_arguments_for_java_call-------------------------
1786 // Arguments (pre-popped from the stack) are taken from the JVMS.
1787 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1788 // Add the call arguments:
1789 const TypeTuple* domain = call->tf()->domain_sig();
1790 uint nargs = domain->cnt();
1791 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1792 Node* arg = argument(i-TypeFunc::Parms);
1793 const Type* t = domain->field_at(i);
1794 if (arg->is_ValueType()) {
1795 assert(t->is_oopptr()->can_be_value_type(), "wrong argument type");
1796 ValueTypeNode* vt = arg->as_ValueType();
1797 if (ValueTypePassFieldsAsArgs) {
1798 // We don't pass value type arguments by reference but instead
1799 // pass each field of the value type
1800 idx += vt->pass_fields(call, idx, *this);
1801 // If a value type argument is passed as fields, attach the Method* to the call site
1802 // to be able to access the extended signature later via attached_method_before_pc().
1803 // For example, see CompiledMethod::preserve_callee_argument_oops().
1804 call->set_override_symbolic_info(true);
1805 continue;
1806 } else {
1807 // Pass value type argument via oop to callee
1808 arg = vt->allocate(this)->get_oop();
1809 }
1810 }
1811 call->init_req(idx, arg);
1812 idx++;
1813 }
1814 }
1815
1816 //---------------------------set_edges_for_java_call---------------------------
1817 // Connect a newly created call into the current JVMS.
1818 // A return value node (if any) is returned from set_edges_for_java_call.
1819 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1820
1821 // Add the predefined inputs:
1822 call->init_req( TypeFunc::Control, control() );
1823 call->init_req( TypeFunc::I_O , i_o() );
1824 call->init_req( TypeFunc::Memory , reset_memory() );
1825 call->init_req( TypeFunc::FramePtr, frameptr() );
1826 call->init_req( TypeFunc::ReturnAdr, top() );
1827
1828 add_safepoint_edges(call, must_throw);
1829
1830 Node* xcall = _gvn.transform(call);
1831
1832 if (xcall == top()) {
1848 if (stopped()) return top(); // maybe the call folded up?
1849
1850 // Note: Since any out-of-line call can produce an exception,
1851 // we always insert an I_O projection from the call into the result.
1852
1853 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1854
1855 if (separate_io_proj) {
1856 // The caller requested separate projections be used by the fall
1857 // through and exceptional paths, so replace the projections for
1858 // the fall through path.
1859 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1860 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1861 }
1862
1863 // Capture the return value, if any.
1864 Node* ret;
1865 if (call->method() == NULL ||
1866 call->method()->return_type()->basic_type() == T_VOID) {
1867 ret = top();
1868 } else {
1869 if (!call->tf()->returns_value_type_as_fields()) {
1870 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1871 } else {
1872 // Return of multiple values (value type fields): we create a
1873 // ValueType node, each field is a projection from the call.
1874 const TypeTuple* range_sig = call->tf()->range_sig();
1875 const Type* t = range_sig->field_at(TypeFunc::Parms);
1876 assert(t->is_valuetypeptr(), "only value types for multiple return values");
1877 ciValueKlass* vk = t->value_klass();
1878 ret = ValueTypeNode::make_from_multi(this, call, vk, TypeFunc::Parms+1, false);
1879 }
1880 }
1881
1882 return ret;
1883 }
1884
1885 //--------------------set_predefined_input_for_runtime_call--------------------
1886 // Reading and setting the memory state is way conservative here.
1887 // The real problem is that I am not doing real Type analysis on memory,
1888 // so I cannot distinguish card mark stores from other stores. Across a GC
1889 // point the Store Barrier and the card mark memory has to agree. I cannot
1890 // have a card mark store and its barrier split across the GC point from
1891 // either above or below. Here I get that to happen by reading ALL of memory.
1892 // A better answer would be to separate out card marks from other memory.
1893 // For now, return the input memory state, so that it can be reused
1894 // after the call, if this call has restricted memory effects.
1895 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1896 // Set fixed predefined input arguments
1897 Node* memory = reset_memory();
1898 call->init_req( TypeFunc::Control, control() );
1899 call->init_req( TypeFunc::I_O, top() ); // does no i/o
|
1527 int adr_idx,
1528 MemNode::MemOrd mo,
1529 LoadNode::ControlDependency control_dependency,
1530 bool require_atomic_access,
1531 bool unaligned,
1532 bool mismatched) {
1533 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1534 const TypePtr* adr_type = NULL; // debug-mode-only argument
1535 debug_only(adr_type = C->get_adr_type(adr_idx));
1536 Node* mem = memory(adr_idx);
1537 Node* ld;
1538 if (require_atomic_access && bt == T_LONG) {
1539 ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1540 } else if (require_atomic_access && bt == T_DOUBLE) {
1541 ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1542 } else {
1543 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1544 }
1545 ld = _gvn.transform(ld);
1546
1547 if (((bt == T_OBJECT || bt == T_VALUETYPE) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1548 // Improve graph before escape analysis and boxing elimination.
1549 record_for_igvn(ld);
1550 }
1551 return ld;
1552 }
1553
1554 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1555 int adr_idx,
1556 MemNode::MemOrd mo,
1557 bool require_atomic_access,
1558 bool unaligned,
1559 bool mismatched) {
1560 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1561 const TypePtr* adr_type = NULL;
1562 debug_only(adr_type = C->get_adr_type(adr_idx));
1563 Node *mem = memory(adr_idx);
1564 Node* st;
1565 if (require_atomic_access && bt == T_LONG) {
1566 st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
1567 } else if (require_atomic_access && bt == T_DOUBLE) {
1777 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1778 if (elembt == T_NARROWOOP) {
1779 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1780 }
1781 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1782 return ld;
1783 }
1784
1785 //-------------------------set_arguments_for_java_call-------------------------
1786 // Arguments (pre-popped from the stack) are taken from the JVMS.
1787 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1788 // Add the call arguments:
1789 const TypeTuple* domain = call->tf()->domain_sig();
1790 uint nargs = domain->cnt();
1791 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1792 Node* arg = argument(i-TypeFunc::Parms);
1793 const Type* t = domain->field_at(i);
1794 if (arg->is_ValueType()) {
1795 assert(t->is_oopptr()->can_be_value_type(), "wrong argument type");
1796 ValueTypeNode* vt = arg->as_ValueType();
1797 // TODO for now, don't scalarize value type receivers because of interface calls
1798 if (call->method()->get_Method()->has_scalarized_args() && t->is_valuetypeptr() && (call->method()->is_static() || i != TypeFunc::Parms)) {
1799 // We don't pass value type arguments by reference but instead
1800 // pass each field of the value type
1801 idx += vt->pass_fields(call, idx, *this);
1802 // If a value type argument is passed as fields, attach the Method* to the call site
1803 // to be able to access the extended signature later via attached_method_before_pc().
1804 // For example, see CompiledMethod::preserve_callee_argument_oops().
1805 call->set_override_symbolic_info(true);
1806 continue;
1807 } else {
1808 // Pass value type argument via oop to callee
1809 arg = vt->allocate(this)->get_oop();
1810 }
1811 }
1812 call->init_req(idx++, arg);
1813
1814 SigEntry res_entry = call->method()->get_Method()->get_res_entry();
1815 if ((int)(idx - TypeFunc::Parms) == res_entry._offset) {
1816 // Skip reserved entry
1817 call->init_req(idx++, top());
1818 if (res_entry._bt == T_DOUBLE || res_entry._bt == T_LONG) {
1819 call->init_req(idx++, top());
1820 }
1821 }
1822 }
1823 }
1824
1825 //---------------------------set_edges_for_java_call---------------------------
1826 // Connect a newly created call into the current JVMS.
1827 // A return value node (if any) is returned from set_edges_for_java_call.
1828 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1829
1830 // Add the predefined inputs:
1831 call->init_req( TypeFunc::Control, control() );
1832 call->init_req( TypeFunc::I_O , i_o() );
1833 call->init_req( TypeFunc::Memory , reset_memory() );
1834 call->init_req( TypeFunc::FramePtr, frameptr() );
1835 call->init_req( TypeFunc::ReturnAdr, top() );
1836
1837 add_safepoint_edges(call, must_throw);
1838
1839 Node* xcall = _gvn.transform(call);
1840
1841 if (xcall == top()) {
1857 if (stopped()) return top(); // maybe the call folded up?
1858
1859 // Note: Since any out-of-line call can produce an exception,
1860 // we always insert an I_O projection from the call into the result.
1861
1862 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1863
1864 if (separate_io_proj) {
1865 // The caller requested separate projections be used by the fall
1866 // through and exceptional paths, so replace the projections for
1867 // the fall through path.
1868 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1869 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1870 }
1871
1872 // Capture the return value, if any.
1873 Node* ret;
1874 if (call->method() == NULL ||
1875 call->method()->return_type()->basic_type() == T_VOID) {
1876 ret = top();
1877 } else if (call->tf()->returns_value_type_as_fields()) {
1878 // Return of multiple values (value type fields): we create a
1879 // ValueType node, each field is a projection from the call.
1880 const TypeTuple* range_sig = call->tf()->range_sig();
1881 const Type* t = range_sig->field_at(TypeFunc::Parms);
1882 uint base_input = TypeFunc::Parms + 1;
1883 ret = ValueTypeNode::make_from_multi(this, call, t->value_klass(), base_input, false);
1884 } else {
1885 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1886 }
1887
1888 return ret;
1889 }
1890
1891 //--------------------set_predefined_input_for_runtime_call--------------------
1892 // Reading and setting the memory state is way conservative here.
1893 // The real problem is that I am not doing real Type analysis on memory,
1894 // so I cannot distinguish card mark stores from other stores. Across a GC
1895 // point the Store Barrier and the card mark memory has to agree. I cannot
1896 // have a card mark store and its barrier split across the GC point from
1897 // either above or below. Here I get that to happen by reading ALL of memory.
1898 // A better answer would be to separate out card marks from other memory.
1899 // For now, return the input memory state, so that it can be reused
1900 // after the call, if this call has restricted memory effects.
1901 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1902 // Set fixed predefined input arguments
1903 Node* memory = reset_memory();
1904 call->init_req( TypeFunc::Control, control() );
1905 call->init_req( TypeFunc::I_O, top() ); // does no i/o
|