< prev index next >
src/share/vm/opto/callGenerator.cpp
Print this page
@@ -115,18 +115,27 @@
// Internal class which handles all out-of-line calls w/o receiver type checks.
class DirectCallGenerator : public CallGenerator {
private:
CallStaticJavaNode* _call_node;
// Force separate memory and I/O projections for the exceptional
- // paths to facilitate late inlinig.
+ // paths to facilitate late inlining.
bool _separate_io_proj;
public:
DirectCallGenerator(ciMethod* method, bool separate_io_proj)
: CallGenerator(method),
_separate_io_proj(separate_io_proj)
{
+ if (method->is_method_handle_intrinsic() &&
+ method->signature()->return_type() == ciEnv::current()->___Value_klass()) {
+ // If that call has not been optimized by the time optimizations
+ // are over, we'll need to add a call to create a value type
+ // instance from the klass returned by the call. Seprating
+ // memory and I/O projections for exceptions is required to
+ // perform that graph transformation.
+ _separate_io_proj = true;
+ }
}
virtual JVMState* generate(JVMState* jvms);
CallStaticJavaNode* call_node() const { return _call_node; }
};
@@ -171,15 +180,20 @@
}
kit.set_arguments_for_java_call(call);
kit.set_edges_for_java_call(call, false, _separate_io_proj);
Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
// Check if return value is a value type pointer
- if (gvn.type(ret)->isa_valuetypeptr()) {
+ const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr();
+ if (vtptr != NULL) {
+ if (vtptr->klass() != kit.C->env()->___Value_klass()) {
// Create ValueTypeNode from the oop and replace the return value
Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
kit.push_node(T_VALUETYPE, vt);
} else {
+ kit.push_node(T_VALUETYPE, ret);
+ }
+ } else {
kit.push_node(method()->return_type()->basic_type(), ret);
}
return kit.transfer_exceptions_into_jvms();
}
@@ -427,11 +441,11 @@
}
map->set_argument(jvms, i1, arg);
} else {
if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
- Node* vt = C->create_vt_node(call, vk, vk, 0, j);
+ Node* vt = C->create_vt_node(call, vk, vk, 0, j, true);
map->set_argument(jvms, i1, gvn.transform(vt));
j += vk->value_arg_slots();
} else {
map->set_argument(jvms, i1, call->in(j));
j++;
@@ -478,11 +492,19 @@
C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
C->env()->notice_inlined_method(_inline_cg->method());
C->set_inlining_progress(true);
if (result->is_ValueType()) {
+ const TypeTuple *range_cc = call->tf()->range_cc();
+ const TypeTuple *range_sig = call->tf()->range_sig();
+ if (range_cc == range_sig) {
result = result->as_ValueType()->store_to_memory(&kit);
+ } else {
+ // Return of multiple values (the fields of a value type)
+ ValueTypeNode* vt = result->as_ValueType();
+ vt->replace_call_results(call, C);
+ }
}
kit.replace_call(call, result, true);
}
@@ -519,19 +541,19 @@
}
};
bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
- CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
+ CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline);
Compile::current()->print_inlining_update_delayed(this);
if (!_input_not_const) {
_attempt++;
}
- if (cg != NULL && cg->is_inline()) {
+ if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) {
assert(!cg->is_late_inline(), "we're doing late inlining");
_inline_cg = cg;
Compile::current()->dec_number_of_mh_late_inlines();
return true;
}
@@ -829,11 +851,11 @@
CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
bool input_not_const;
- CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
+ CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false);
Compile* C = Compile::current();
if (cg != NULL) {
if (!delayed_forbidden && AlwaysIncrementalInline) {
return CallGenerator::for_late_inline(callee, cg);
} else {
@@ -842,20 +864,20 @@
}
int bci = jvms->bci();
ciCallProfile profile = caller->call_profile_at_bci(bci);
int call_site_count = caller->scale_count(profile.count());
- if (IncrementalInline && call_site_count > 0 &&
- (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
+ if (IncrementalInline && (AlwaysIncrementalInline ||
+ (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
} else {
// Out-of-line call.
return CallGenerator::for_direct_call(callee);
}
}
-CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
+CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
Compile* C = kit.C;
vmIntrinsics::ID iid = callee->intrinsic_id();
input_not_const = true;
@@ -878,11 +900,14 @@
CallGenerator* cg = C->call_generator(target, vtable_index,
false /* call_does_dispatch */,
jvms,
true /* allow_inline */,
- PROB_ALWAYS);
+ PROB_ALWAYS,
+ NULL,
+ true,
+ delayed_forbidden);
return cg;
} else {
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
"receiver not constant");
}
@@ -962,11 +987,13 @@
speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
}
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
true /* allow_inline */,
PROB_ALWAYS,
- speculative_receiver_type);
+ speculative_receiver_type,
+ true,
+ delayed_forbidden);
return cg;
} else {
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
"member_name not constant");
}
< prev index next >