--- old/src/share/vm/opto/callGenerator.cpp 2017-05-29 18:07:43.883026121 +0200 +++ new/src/share/vm/opto/callGenerator.cpp 2017-05-29 18:07:43.808026202 +0200 @@ -117,7 +117,7 @@ private: CallStaticJavaNode* _call_node; // Force separate memory and I/O projections for the exceptional - // paths to facilitate late inlinig. + // paths to facilitate late inlining. bool _separate_io_proj; public: @@ -125,6 +125,15 @@ : CallGenerator(method), _separate_io_proj(separate_io_proj) { + if (method->is_method_handle_intrinsic() && + method->signature()->return_type() == ciEnv::current()->___Value_klass()) { + // If that call has not been optimized by the time optimizations + // are over, we'll need to add a call to create a value type + // instance from the klass returned by the call. Seprating + // memory and I/O projections for exceptions is required to + // perform that graph transformation. + _separate_io_proj = true; + } } virtual JVMState* generate(JVMState* jvms); @@ -173,10 +182,15 @@ kit.set_edges_for_java_call(call, false, _separate_io_proj); Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); // Check if return value is a value type pointer - if (gvn.type(ret)->isa_valuetypeptr()) { - // Create ValueTypeNode from the oop and replace the return value - Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); - kit.push_node(T_VALUETYPE, vt); + const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr(); + if (vtptr != NULL) { + if (vtptr->klass() != kit.C->env()->___Value_klass()) { + // Create ValueTypeNode from the oop and replace the return value + Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); + kit.push_node(T_VALUETYPE, vt); + } else { + kit.push_node(T_VALUETYPE, ret); + } } else { kit.push_node(method()->return_type()->basic_type(), ret); } @@ -429,7 +443,7 @@ } else { if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); - Node* vt = C->create_vt_node(call, vk, vk, 0, j); + Node* vt = C->create_vt_node(call, vk, vk, 0, j, true); map->set_argument(jvms, i1, gvn.transform(vt)); j += vk->value_arg_slots(); } else { @@ -480,7 +494,15 @@ C->set_inlining_progress(true); if (result->is_ValueType()) { - result = result->as_ValueType()->store_to_memory(&kit); + const TypeTuple *range_cc = call->tf()->range_cc(); + const TypeTuple *range_sig = call->tf()->range_sig(); + if (range_cc == range_sig) { + result = result->as_ValueType()->store_to_memory(&kit); + } else { + // Return of multiple values (the fields of a value type) + ValueTypeNode* vt = result->as_ValueType(); + vt->replace_call_results(call, C); + } } kit.replace_call(call, result, true); @@ -520,16 +542,16 @@ }; bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { - - CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); + + CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline); Compile::current()->print_inlining_update_delayed(this); if (!_input_not_const) { _attempt++; } - - if (cg != NULL && cg->is_inline()) { + + if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) { assert(!cg->is_late_inline(), "we're doing late inlining"); _inline_cg = cg; Compile::current()->dec_number_of_mh_late_inlines(); @@ -831,7 +853,7 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) { assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch"); bool input_not_const; - CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const); + CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false); Compile* C = Compile::current(); if (cg != NULL) { if (!delayed_forbidden && AlwaysIncrementalInline) { @@ -844,8 +866,8 @@ ciCallProfile profile = caller->call_profile_at_bci(bci); int call_site_count = caller->scale_count(profile.count()); - if (IncrementalInline && call_site_count > 0 && - (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) { + if (IncrementalInline && (AlwaysIncrementalInline || + (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) { return CallGenerator::for_mh_late_inline(caller, callee, input_not_const); } else { // Out-of-line call. @@ -853,7 +875,7 @@ } } -CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) { +CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); Compile* C = kit.C; @@ -880,7 +902,10 @@ false /* call_does_dispatch */, jvms, true /* allow_inline */, - PROB_ALWAYS); + PROB_ALWAYS, + NULL, + true, + delayed_forbidden); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), @@ -964,7 +989,9 @@ CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true /* allow_inline */, PROB_ALWAYS, - speculative_receiver_type); + speculative_receiver_type, + true, + delayed_forbidden); return cg; } else { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),