< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




 798     int len = (int)live_locals.size();
 799     if (!live_locals.is_valid() || len == 0)
 800       // This method is trivial, or is poisoned by a breakpoint.
 801       return true;
 802     assert(len == jvms->loc_size(), "live map consistent with locals map");
 803     for (int local = 0; local < len; local++) {
 804       if (!live_locals.at(local) && map->local(jvms, local) != top()) {
 805         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 806           tty->print_cr("Zombie local %d: ", local);
 807           jvms->dump();
 808         }
 809         return false;
 810       }
 811     }
 812   }
 813   return true;
 814 }
 815 
 816 #endif //ASSERT
 817 
 818 // Helper function for enforcing certain bytecodes to reexecute if
 819 // deoptimization happens
 820 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
 821   ciMethod* cur_method = jvms->method();
 822   int       cur_bci   = jvms->bci();
 823   if (cur_method != NULL && cur_bci != InvocationEntryBci) {
 824     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
 825     return Interpreter::bytecode_should_reexecute(code) ||
 826            is_anewarray && code == Bytecodes::_multianewarray;
 827     // Reexecute _multianewarray bytecode which was replaced with
 828     // sequence of [a]newarray. See Parse::do_multianewarray().
 829     //
 830     // Note: interpreter should not have it set since this optimization
 831     // is limited by dimensions and guarded by flag so in some cases
 832     // multianewarray() runtime calls will be generated and
 833     // the bytecode should not be reexecutes (stack will not be reset).
 834   } else
 835     return false;
 836 }
 837 
 838 // Helper function for adding JVMState and debug information to node
 839 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 840   // Add the safepoint edges to the call (or other safepoint).
 841 
 842   // Make sure dead locals are set to top.  This
 843   // should help register allocation time and cut down on the size
 844   // of the deoptimization information.
 845   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
 846 
 847   // Walk the inline list to fill in the correct set of JVMState's
 848   // Also fill in the associated edges for each JVMState.
 849 
 850   // If the bytecode needs to be reexecuted we need to put
 851   // the arguments back on the stack.
 852   const bool should_reexecute = jvms()->should_reexecute();
 853   JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
 854 
 855   // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
 856   // undefined if the bci is different.  This is normal for Parse but it
 857   // should not happen for LibraryCallKit because only one bci is processed.


 861   // If we are guaranteed to throw, we can prune everything but the
 862   // input to the current bytecode.
 863   bool can_prune_locals = false;
 864   uint stack_slots_not_pruned = 0;
 865   int inputs = 0, depth = 0;
 866   if (must_throw) {
 867     assert(method() == youngest_jvms->method(), "sanity");
 868     if (compute_stack_effects(inputs, depth)) {
 869       can_prune_locals = true;
 870       stack_slots_not_pruned = inputs;
 871     }
 872   }
 873 
 874   if (env()->should_retain_local_variables()) {
 875     // At any safepoint, this method can get breakpointed, which would
 876     // then require an immediate deoptimization.
 877     can_prune_locals = false;  // do not prune locals
 878     stack_slots_not_pruned = 0;
 879   }
 880 
 881   // do not scribble on the input jvms
 882   JVMState* out_jvms = youngest_jvms->clone_deep(C);
 883   call->set_jvms(out_jvms); // Start jvms list for call node
 884 
 885   // For a known set of bytecodes, the interpreter should reexecute them if
 886   // deoptimization happens. We set the reexecute state for them here
 887   if (out_jvms->is_reexecute_undefined() && //don't change if already specified
 888       should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
 889     out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
 890   }
 891 
 892   // Presize the call:
 893   DEBUG_ONLY(uint non_debug_edges = call->req());
 894   call->add_req_batch(top(), youngest_jvms->debug_depth());
 895   assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
 896 
 897   // Set up edges so that the call looks like this:
 898   //  Call [state:] ctl io mem fptr retadr
 899   //       [parms:] parm0 ... parmN
 900   //       [root:]  loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 901   //    [...mid:]   loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
 902   //       [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
 903   // Note that caller debug info precedes callee debug info.
 904 
 905   // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
 906   uint debug_ptr = call->req();
 907 
 908   // Loop over the map input edges associated with jvms, add them
 909   // to the call node, & reset all offsets to match call node array.
 910   for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
 911     uint debug_end   = debug_ptr;
 912     uint debug_start = debug_ptr - in_jvms->debug_size();
 913     debug_ptr = debug_start;  // back up the ptr
 914 
 915     uint p = debug_start;  // walks forward in [debug_start, debug_end)
 916     uint j, k, l;
 917     SafePointNode* in_map = in_jvms->map();
 918     out_jvms->set_map(call);
 919 
 920     if (can_prune_locals) {
 921       assert(in_jvms->method() == out_jvms->method(), "sanity");
 922       // If the current throw can reach an exception handler in this JVMS,
 923       // then we must keep everything live that can reach that handler.
 924       // As a quick and dirty approximation, we look for any handlers at all.
 925       if (in_jvms->method()->has_exception_handlers()) {
 926         can_prune_locals = false;
 927       }
 928     }
 929 
 930     // Add the Locals
 931     k = in_jvms->locoff();
 932     l = in_jvms->loc_size();
 933     out_jvms->set_locoff(p);
 934     if (!can_prune_locals) {
 935       for (j = 0; j < l; j++)
 936         call->set_req(p++, in_map->in(k+j));
 937     } else {
 938       p += l;  // already set to top above by add_req_batch
 939     }
 940 
 941     // Add the Expression Stack
 942     k = in_jvms->stkoff();
 943     l = in_jvms->sp();
 944     out_jvms->set_stkoff(p);
 945     if (!can_prune_locals) {
 946       for (j = 0; j < l; j++)
 947         call->set_req(p++, in_map->in(k+j));
 948     } else if (can_prune_locals && stack_slots_not_pruned != 0) {
 949       // Divide stack into {S0,...,S1}, where S0 is set to top.
 950       uint s1 = stack_slots_not_pruned;
 951       stack_slots_not_pruned = 0;  // for next iteration
 952       if (s1 > l)  s1 = l;
 953       uint s0 = l - s1;
 954       p += s0;  // skip the tops preinstalled by add_req_batch
 955       for (j = s0; j < l; j++)
 956         call->set_req(p++, in_map->in(k+j));
 957     } else {
 958       p += l;  // already set to top above by add_req_batch
 959     }
 960 
 961     // Add the Monitors
 962     k = in_jvms->monoff();
 963     l = in_jvms->mon_size();
 964     out_jvms->set_monoff(p);
 965     for (j = 0; j < l; j++)
 966       call->set_req(p++, in_map->in(k+j));
 967 
 968     // Copy any scalar object fields.
 969     k = in_jvms->scloff();
 970     l = in_jvms->scl_size();
 971     out_jvms->set_scloff(p);
 972     for (j = 0; j < l; j++)
 973       call->set_req(p++, in_map->in(k+j));
 974 
 975     // Finish the new jvms.
 976     out_jvms->set_endoff(p);
 977 
 978     assert(out_jvms->endoff()     == debug_end,             "fill ptr must match");
 979     assert(out_jvms->depth()      == in_jvms->depth(),      "depth must match");
 980     assert(out_jvms->loc_size()   == in_jvms->loc_size(),   "size must match");
 981     assert(out_jvms->mon_size()   == in_jvms->mon_size(),   "size must match");
 982     assert(out_jvms->scl_size()   == in_jvms->scl_size(),   "size must match");
 983     assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
 984 
 985     // Update the two tail pointers in parallel.
 986     out_jvms = out_jvms->caller();
 987     in_jvms  = in_jvms->caller();
 988   }
 989 
 990   assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
 991 
 992   // Test the correctness of JVMState::debug_xxx accessors:
 993   assert(call->jvms()->debug_start() == non_debug_edges, "");
 994   assert(call->jvms()->debug_end()   == call->req(), "");
 995   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
 996 }
 997 
 998 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
 999   Bytecodes::Code code = java_bc();
1000   if (code == Bytecodes::_wide) {
1001     code = method()->java_code_at_bci(bci() + 1);
1002   }
1003 
1004   BasicType rtype = T_ILLEGAL;
1005   int       rsize = 0;
1006 
1007   if (code != Bytecodes::_illegal) {
1008     depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1009     rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
1010     if (rtype < T_CONFLICT)
1011       rsize = type2size[rtype];
1012   }
1013 
1014   switch (code) {
1015   case Bytecodes::_illegal:


1714     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1715   }
1716   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1717   return ld;
1718 }
1719 
1720 //-------------------------set_arguments_for_java_call-------------------------
1721 // Arguments (pre-popped from the stack) are taken from the JVMS.
1722 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1723   // Add the call arguments:
1724   const TypeTuple* domain = call->tf()->domain_sig();
1725   uint nargs = domain->cnt();
1726   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1727     Node* arg = argument(i-TypeFunc::Parms);
1728     if (ValueTypePassFieldsAsArgs) {
1729       if (arg->is_ValueType()) {
1730         ValueTypeNode* vt = arg->as_ValueType();
1731         if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
1732           // We don't pass value type arguments by reference but instead
1733           // pass each field of the value type
1734           idx += vt->set_arguments_for_java_call(call, idx, *this);
1735         } else {
1736           arg = arg->as_ValueType()->store_to_memory(this);
1737           call->init_req(idx, arg);
1738           idx++;
1739         }
1740         // If a value type argument is passed as fields, attach the Method* to the call site
1741         // to be able to access the extended signature later via attached_method_before_pc().
1742         // For example, see CompiledMethod::preserve_callee_argument_oops().
1743         call->set_override_symbolic_info(true);
1744       } else {
1745         call->init_req(idx, arg);
1746         idx++;
1747       }
1748     } else {
1749       if (arg->is_ValueType()) {
1750         // Pass value type argument via oop to callee
1751         arg = arg->as_ValueType()->store_to_memory(this);
1752       }
1753       call->init_req(i, arg);
1754     }


1777   }
1778   assert(xcall == call, "call identity is stable");
1779 
1780   // Re-use the current map to produce the result.
1781 
1782   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1783   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1784   set_all_memory_call(xcall, separate_io_proj);
1785 
1786   //return xcall;   // no need, caller already has it
1787 }
1788 
1789 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1790   if (stopped())  return top();  // maybe the call folded up?
1791 
1792   // Capture the return value, if any.
1793   Node* ret;
1794   if (call->method() == NULL ||
1795       call->method()->return_type()->basic_type() == T_VOID)
1796         ret = top();
1797   else  ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));












1798 
1799   // Note:  Since any out-of-line call can produce an exception,
1800   // we always insert an I_O projection from the call into the result.
1801 
1802   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1803 
1804   if (separate_io_proj) {
1805     // The caller requested separate projections be used by the fall
1806     // through and exceptional paths, so replace the projections for
1807     // the fall through path.
1808     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1809     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1810   }
1811   return ret;
1812 }
1813 
1814 //--------------------set_predefined_input_for_runtime_call--------------------
1815 // Reading and setting the memory state is way conservative here.
1816 // The real problem is that I am not doing real Type analysis on memory,
1817 // so I cannot distinguish card mark stores from other stores.  Across a GC




 798     int len = (int)live_locals.size();
 799     if (!live_locals.is_valid() || len == 0)
 800       // This method is trivial, or is poisoned by a breakpoint.
 801       return true;
 802     assert(len == jvms->loc_size(), "live map consistent with locals map");
 803     for (int local = 0; local < len; local++) {
 804       if (!live_locals.at(local) && map->local(jvms, local) != top()) {
 805         if (PrintMiscellaneous && (Verbose || WizardMode)) {
 806           tty->print_cr("Zombie local %d: ", local);
 807           jvms->dump();
 808         }
 809         return false;
 810       }
 811     }
 812   }
 813   return true;
 814 }
 815 
 816 #endif //ASSERT
 817 




















 818 // Helper function for adding JVMState and debug information to node
 819 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
 820   // Add the safepoint edges to the call (or other safepoint).
 821 
 822   // Make sure dead locals are set to top.  This
 823   // should help register allocation time and cut down on the size
 824   // of the deoptimization information.
 825   assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
 826 
 827   // Walk the inline list to fill in the correct set of JVMState's
 828   // Also fill in the associated edges for each JVMState.
 829 
 830   // If the bytecode needs to be reexecuted we need to put
 831   // the arguments back on the stack.
 832   const bool should_reexecute = jvms()->should_reexecute();
 833   JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
 834 
 835   // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
 836   // undefined if the bci is different.  This is normal for Parse but it
 837   // should not happen for LibraryCallKit because only one bci is processed.


 841   // If we are guaranteed to throw, we can prune everything but the
 842   // input to the current bytecode.
 843   bool can_prune_locals = false;
 844   uint stack_slots_not_pruned = 0;
 845   int inputs = 0, depth = 0;
 846   if (must_throw) {
 847     assert(method() == youngest_jvms->method(), "sanity");
 848     if (compute_stack_effects(inputs, depth)) {
 849       can_prune_locals = true;
 850       stack_slots_not_pruned = inputs;
 851     }
 852   }
 853 
 854   if (env()->should_retain_local_variables()) {
 855     // At any safepoint, this method can get breakpointed, which would
 856     // then require an immediate deoptimization.
 857     can_prune_locals = false;  // do not prune locals
 858     stack_slots_not_pruned = 0;
 859   }
 860 
 861   C->add_safepoint_edges(call, youngest_jvms, can_prune_locals, stack_slots_not_pruned);


















































































































 862 }
 863 
 864 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
 865   Bytecodes::Code code = java_bc();
 866   if (code == Bytecodes::_wide) {
 867     code = method()->java_code_at_bci(bci() + 1);
 868   }
 869 
 870   BasicType rtype = T_ILLEGAL;
 871   int       rsize = 0;
 872 
 873   if (code != Bytecodes::_illegal) {
 874     depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
 875     rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V
 876     if (rtype < T_CONFLICT)
 877       rsize = type2size[rtype];
 878   }
 879 
 880   switch (code) {
 881   case Bytecodes::_illegal:


1580     elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1581   }
1582   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1583   return ld;
1584 }
1585 
1586 //-------------------------set_arguments_for_java_call-------------------------
1587 // Arguments (pre-popped from the stack) are taken from the JVMS.
1588 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1589   // Add the call arguments:
1590   const TypeTuple* domain = call->tf()->domain_sig();
1591   uint nargs = domain->cnt();
1592   for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1593     Node* arg = argument(i-TypeFunc::Parms);
1594     if (ValueTypePassFieldsAsArgs) {
1595       if (arg->is_ValueType()) {
1596         ValueTypeNode* vt = arg->as_ValueType();
1597         if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
1598           // We don't pass value type arguments by reference but instead
1599           // pass each field of the value type
1600           idx += vt->pass_fields(call, idx, *this);
1601         } else {
1602           arg = arg->as_ValueType()->store_to_memory(this);
1603           call->init_req(idx, arg);
1604           idx++;
1605         }
1606         // If a value type argument is passed as fields, attach the Method* to the call site
1607         // to be able to access the extended signature later via attached_method_before_pc().
1608         // For example, see CompiledMethod::preserve_callee_argument_oops().
1609         call->set_override_symbolic_info(true);
1610       } else {
1611         call->init_req(idx, arg);
1612         idx++;
1613       }
1614     } else {
1615       if (arg->is_ValueType()) {
1616         // Pass value type argument via oop to callee
1617         arg = arg->as_ValueType()->store_to_memory(this);
1618       }
1619       call->init_req(i, arg);
1620     }


1643   }
1644   assert(xcall == call, "call identity is stable");
1645 
1646   // Re-use the current map to produce the result.
1647 
1648   set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1649   set_i_o(    _gvn.transform(new ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
1650   set_all_memory_call(xcall, separate_io_proj);
1651 
1652   //return xcall;   // no need, caller already has it
1653 }
1654 
1655 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1656   if (stopped())  return top();  // maybe the call folded up?
1657 
1658   // Capture the return value, if any.
1659   Node* ret;
1660   if (call->method() == NULL ||
1661       call->method()->return_type()->basic_type() == T_VOID)
1662         ret = top();
1663   else {
1664     if (!call->tf()->returns_value_type_as_fields()) {
1665       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1666     } else {
1667       // Return of multiple values (value type fields): we create a
1668       // ValueType node, each field is a projection from the call.
1669       const TypeTuple *range_sig = call->tf()->range_sig();
1670       const Type* t = range_sig->field_at(TypeFunc::Parms);
1671       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1672       ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1673       ret = C->create_vt_node(call, vk, vk, 0, TypeFunc::Parms+1, false);
1674     }
1675   }
1676 
1677   // Note:  Since any out-of-line call can produce an exception,
1678   // we always insert an I_O projection from the call into the result.
1679 
1680   make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1681 
1682   if (separate_io_proj) {
1683     // The caller requested separate projections be used by the fall
1684     // through and exceptional paths, so replace the projections for
1685     // the fall through path.
1686     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1687     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1688   }
1689   return ret;
1690 }
1691 
1692 //--------------------set_predefined_input_for_runtime_call--------------------
1693 // Reading and setting the memory state is way conservative here.
1694 // The real problem is that I am not doing real Type analysis on memory,
1695 // so I cannot distinguish card mark stores from other stores.  Across a GC


< prev index next >