< prev index next >
src/share/vm/opto/graphKit.cpp
Print this page
@@ -813,30 +813,10 @@
return true;
}
#endif //ASSERT
-// Helper function for enforcing certain bytecodes to reexecute if
-// deoptimization happens
-static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
- ciMethod* cur_method = jvms->method();
- int cur_bci = jvms->bci();
- if (cur_method != NULL && cur_bci != InvocationEntryBci) {
- Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
- return Interpreter::bytecode_should_reexecute(code) ||
- is_anewarray && code == Bytecodes::_multianewarray;
- // Reexecute _multianewarray bytecode which was replaced with
- // sequence of [a]newarray. See Parse::do_multianewarray().
- //
- // Note: interpreter should not have it set since this optimization
- // is limited by dimensions and guarded by flag so in some cases
- // multianewarray() runtime calls will be generated and
- // the bytecode should not be reexecutes (stack will not be reset).
- } else
- return false;
-}
-
// Helper function for adding JVMState and debug information to node
void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
// Add the safepoint edges to the call (or other safepoint).
// Make sure dead locals are set to top. This
@@ -876,125 +856,11 @@
// then require an immediate deoptimization.
can_prune_locals = false; // do not prune locals
stack_slots_not_pruned = 0;
}
- // do not scribble on the input jvms
- JVMState* out_jvms = youngest_jvms->clone_deep(C);
- call->set_jvms(out_jvms); // Start jvms list for call node
-
- // For a known set of bytecodes, the interpreter should reexecute them if
- // deoptimization happens. We set the reexecute state for them here
- if (out_jvms->is_reexecute_undefined() && //don't change if already specified
- should_reexecute_implied_by_bytecode(out_jvms, call->is_AllocateArray())) {
- out_jvms->set_should_reexecute(true); //NOTE: youngest_jvms not changed
- }
-
- // Presize the call:
- DEBUG_ONLY(uint non_debug_edges = call->req());
- call->add_req_batch(top(), youngest_jvms->debug_depth());
- assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
-
- // Set up edges so that the call looks like this:
- // Call [state:] ctl io mem fptr retadr
- // [parms:] parm0 ... parmN
- // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
- // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
- // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
- // Note that caller debug info precedes callee debug info.
-
- // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
- uint debug_ptr = call->req();
-
- // Loop over the map input edges associated with jvms, add them
- // to the call node, & reset all offsets to match call node array.
- for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) {
- uint debug_end = debug_ptr;
- uint debug_start = debug_ptr - in_jvms->debug_size();
- debug_ptr = debug_start; // back up the ptr
-
- uint p = debug_start; // walks forward in [debug_start, debug_end)
- uint j, k, l;
- SafePointNode* in_map = in_jvms->map();
- out_jvms->set_map(call);
-
- if (can_prune_locals) {
- assert(in_jvms->method() == out_jvms->method(), "sanity");
- // If the current throw can reach an exception handler in this JVMS,
- // then we must keep everything live that can reach that handler.
- // As a quick and dirty approximation, we look for any handlers at all.
- if (in_jvms->method()->has_exception_handlers()) {
- can_prune_locals = false;
- }
- }
-
- // Add the Locals
- k = in_jvms->locoff();
- l = in_jvms->loc_size();
- out_jvms->set_locoff(p);
- if (!can_prune_locals) {
- for (j = 0; j < l; j++)
- call->set_req(p++, in_map->in(k+j));
- } else {
- p += l; // already set to top above by add_req_batch
- }
-
- // Add the Expression Stack
- k = in_jvms->stkoff();
- l = in_jvms->sp();
- out_jvms->set_stkoff(p);
- if (!can_prune_locals) {
- for (j = 0; j < l; j++)
- call->set_req(p++, in_map->in(k+j));
- } else if (can_prune_locals && stack_slots_not_pruned != 0) {
- // Divide stack into {S0,...,S1}, where S0 is set to top.
- uint s1 = stack_slots_not_pruned;
- stack_slots_not_pruned = 0; // for next iteration
- if (s1 > l) s1 = l;
- uint s0 = l - s1;
- p += s0; // skip the tops preinstalled by add_req_batch
- for (j = s0; j < l; j++)
- call->set_req(p++, in_map->in(k+j));
- } else {
- p += l; // already set to top above by add_req_batch
- }
-
- // Add the Monitors
- k = in_jvms->monoff();
- l = in_jvms->mon_size();
- out_jvms->set_monoff(p);
- for (j = 0; j < l; j++)
- call->set_req(p++, in_map->in(k+j));
-
- // Copy any scalar object fields.
- k = in_jvms->scloff();
- l = in_jvms->scl_size();
- out_jvms->set_scloff(p);
- for (j = 0; j < l; j++)
- call->set_req(p++, in_map->in(k+j));
-
- // Finish the new jvms.
- out_jvms->set_endoff(p);
-
- assert(out_jvms->endoff() == debug_end, "fill ptr must match");
- assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
- assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
- assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
- assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
- assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
-
- // Update the two tail pointers in parallel.
- out_jvms = out_jvms->caller();
- in_jvms = in_jvms->caller();
- }
-
- assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
-
- // Test the correctness of JVMState::debug_xxx accessors:
- assert(call->jvms()->debug_start() == non_debug_edges, "");
- assert(call->jvms()->debug_end() == call->req(), "");
- assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
+ C->add_safepoint_edges(call, youngest_jvms, can_prune_locals, stack_slots_not_pruned);
}
bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
Bytecodes::Code code = java_bc();
if (code == Bytecodes::_wide) {
@@ -1729,11 +1595,11 @@
if (arg->is_ValueType()) {
ValueTypeNode* vt = arg->as_ValueType();
if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
// We don't pass value type arguments by reference but instead
// pass each field of the value type
- idx += vt->set_arguments_for_java_call(call, idx, *this);
+ idx += vt->pass_fields(call, idx, *this);
} else {
arg = arg->as_ValueType()->store_to_memory(this);
call->init_req(idx, arg);
idx++;
}
@@ -1792,11 +1658,23 @@
// Capture the return value, if any.
Node* ret;
if (call->method() == NULL ||
call->method()->return_type()->basic_type() == T_VOID)
ret = top();
- else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+ else {
+ if (!call->tf()->returns_value_type_as_fields()) {
+ ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+ } else {
+ // Return of multiple values (value type fields): we create a
+ // ValueType node, each field is a projection from the call.
+ const TypeTuple *range_sig = call->tf()->range_sig();
+ const Type* t = range_sig->field_at(TypeFunc::Parms);
+ assert(t->isa_valuetypeptr(), "only value types for multiple return values");
+ ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
+ ret = C->create_vt_node(call, vk, vk, 0, TypeFunc::Parms+1, false);
+ }
+ }
// Note: Since any out-of-line call can produce an exception,
// we always insert an I_O projection from the call into the result.
make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
< prev index next >