22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/sharedRuntime.hpp"
43
44 // Utility function.
45 const TypeFunc* CallGenerator::tf() const {
46 return TypeFunc::make(method());
47 }
48
49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
50 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
51 }
52
53 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
54 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
55 return is_inlined_method_handle_intrinsic(symbolic_info, m);
56 }
57
58 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
59 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
60 }
61
106 GraphKit& exits = parser.exits();
107
108 if (C->failing()) {
109 while (exits.pop_exception_state() != NULL) ;
110 return NULL;
111 }
112
113 assert(exits.jvms()->same_calls_as(jvms), "sanity");
114
115 // Simply return the exit state of the parser,
116 // augmented by any exceptional states.
117 return exits.transfer_exceptions_into_jvms();
118 }
119
120 //---------------------------DirectCallGenerator------------------------------
121 // Internal class which handles all out-of-line calls w/o receiver type checks.
122 class DirectCallGenerator : public CallGenerator {
123 private:
124 CallStaticJavaNode* _call_node;
125 // Force separate memory and I/O projections for the exceptional
126 // paths to facilitate late inlinig.
127 bool _separate_io_proj;
128
129 public:
130 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
131 : CallGenerator(method),
132 _separate_io_proj(separate_io_proj)
133 {
134 }
135 virtual JVMState* generate(JVMState* jvms);
136
137 CallStaticJavaNode* call_node() const { return _call_node; }
138 };
139
140 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
141 GraphKit kit(jvms);
142 kit.C->print_inlining_update(this);
143 bool is_static = method()->is_static();
144 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
145 : SharedRuntime::get_resolve_opt_virtual_call_stub();
146
147 if (kit.C->log() != NULL) {
148 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
149 }
150
151 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
152 if (is_inlined_method_handle_intrinsic(jvms, method())) {
153 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
154 // additional information about the method being invoked should be attached
155 // to the call site to make resolution logic work
156 // (see SharedRuntime::resolve_static_call_C).
157 call->set_override_symbolic_info(true);
158 }
159 _call_node = call; // Save the call node in case we need it later
160 if (!is_static) {
161 // Make an explicit receiver null_check as part of this call.
162 // Since we share a map with the caller, his JVMS gets adjusted.
163 kit.null_check_receiver_before_call(method());
164 if (kit.stopped()) {
165 // And dump it back to the caller, decorated with any exceptions:
166 return kit.transfer_exceptions_into_jvms();
167 }
168 // Mark the call node as virtual, sort of:
169 call->set_optimized_virtual(true);
170 if (method()->is_method_handle_intrinsic() ||
171 method()->is_compiled_lambda_form()) {
172 call->set_method_handle_invoke(true);
173 }
174 }
175 kit.set_arguments_for_java_call(call);
176 kit.set_edges_for_java_call(call, false, _separate_io_proj);
177 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
178 kit.push_node(method()->return_type()->basic_type(), ret);
179 return kit.transfer_exceptions_into_jvms();
180 }
181
182 //--------------------------VirtualCallGenerator------------------------------
183 // Internal class which handles all out-of-line calls checking receiver type.
184 class VirtualCallGenerator : public CallGenerator {
185 private:
186 int _vtable_index;
187 public:
188 VirtualCallGenerator(ciMethod* method, int vtable_index)
189 : CallGenerator(method), _vtable_index(vtable_index)
190 {
191 assert(vtable_index == Method::invalid_vtable_index ||
192 vtable_index >= 0, "either invalid or usable");
193 }
194 virtual bool is_virtual() const { return true; }
195 virtual JVMState* generate(JVMState* jvms);
196 };
197
198 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
199 GraphKit kit(jvms);
200 Node* receiver = kit.argument(0);
201
202 kit.C->print_inlining_update(this);
203
204 if (kit.C->log() != NULL) {
205 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
206 }
207
208 // If the receiver is a constant null, do not torture the system
209 // by attempting to call through it. The compile will proceed
210 // correctly, but may bail out in final_graph_reshaping, because
211 // the call instruction will have a seemingly deficient out-count.
212 // (The bailout says something misleading about an "infinite loop".)
213 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
214 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
215 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
216 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
217 kit.inc_sp(arg_size); // restore arguments
218 kit.uncommon_trap(Deoptimization::Reason_null_check,
219 Deoptimization::Action_none,
220 NULL, "null receiver");
221 return kit.transfer_exceptions_into_jvms();
222 }
223
224 // Ideally we would unconditionally do a null check here and let it
225 // be converted to an implicit check based on profile information.
226 // However currently the conversion to implicit null checks in
227 // Block::implicit_null_check() only looks for loads and stores, not calls.
228 ciMethod *caller = kit.method();
229 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
230 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
231 ((ImplicitNullCheckThreshold > 0) && caller_md &&
232 (caller_md->trap_count(Deoptimization::Reason_null_check)
233 >= (uint)ImplicitNullCheckThreshold))) {
239 return kit.transfer_exceptions_into_jvms();
240 }
241 }
242
243 assert(!method()->is_static(), "virtual call must not be to static");
244 assert(!method()->is_final(), "virtual call should not be to final");
245 assert(!method()->is_private(), "virtual call should not be to private");
246 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
247 "no vtable calls if +UseInlineCaches ");
248 address target = SharedRuntime::get_resolve_virtual_call_stub();
249 // Normal inline cache used for call
250 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
251 if (is_inlined_method_handle_intrinsic(jvms, method())) {
252 // To be able to issue a direct call (optimized virtual or virtual)
253 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
254 // about the method being invoked should be attached to the call site to
255 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
256 call->set_override_symbolic_info(true);
257 }
258 kit.set_arguments_for_java_call(call);
259 kit.set_edges_for_java_call(call);
260 Node* ret = kit.set_results_for_java_call(call);
261 kit.push_node(method()->return_type()->basic_type(), ret);
262
263 // Represent the effect of an implicit receiver null_check
264 // as part of this call. Since we share a map with the caller,
265 // his JVMS gets adjusted.
266 kit.cast_not_null(receiver);
267 return kit.transfer_exceptions_into_jvms();
268 }
269
270 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
271 if (InlineTree::check_can_parse(m) != NULL) return NULL;
272 return new ParseGenerator(m, expected_uses);
273 }
274
275 // As a special case, the JVMS passed to this CallGenerator is
276 // for the method execution already in progress, not just the JVMS
277 // of the caller. Thus, this CallGenerator cannot be mixed with others!
278 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
339 C->print_inlining_update_delayed(this);
340 }
341
342 virtual void set_unique_id(jlong id) {
343 _unique_id = id;
344 }
345
346 virtual jlong unique_id() const {
347 return _unique_id;
348 }
349 };
350
351 void LateInlineCallGenerator::do_late_inline() {
352 // Can't inline it
353 CallStaticJavaNode* call = call_node();
354 if (call == NULL || call->outcnt() == 0 ||
355 call->in(0) == NULL || call->in(0)->is_top()) {
356 return;
357 }
358
359 const TypeTuple *r = call->tf()->domain();
360 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
361 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
362 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
363 return;
364 }
365 }
366
367 if (call->in(TypeFunc::Memory)->is_top()) {
368 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
369 return;
370 }
371
372 // check for unreachable loop
373 CallProjections callprojs;
374 call->extract_projections(&callprojs, true);
375 if (callprojs.fallthrough_catchproj == call->in(0) ||
376 callprojs.catchall_catchproj == call->in(0) ||
377 callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
378 callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
379 callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
380 callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
381 (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
382 (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
383 return;
384 }
385
386 Compile* C = Compile::current();
387 // Remove inlined methods from Compiler's lists.
388 if (call->is_macro()) {
389 C->remove_macro_node(call);
390 }
391
392 bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
393 if (_is_pure_call && result_not_used) {
394 // The call is marked as pure (no important side effects), but result isn't used.
395 // It's safe to remove the call.
396 GraphKit kit(call->jvms());
397 kit.replace_call(call, C->top(), true);
398 } else {
399 // Make a clone of the JVMState that appropriate to use for driving a parse
400 JVMState* old_jvms = call->jvms();
401 JVMState* jvms = old_jvms->clone_shallow(C);
402 uint size = call->req();
403 SafePointNode* map = new SafePointNode(size, jvms);
404 for (uint i1 = 0; i1 < size; i1++) {
405 map->init_req(i1, call->in(i1));
406 }
407
408 // Make sure the state is a MergeMem for parsing.
409 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
410 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
411 C->initial_gvn()->set_type_bottom(mem);
412 map->set_req(TypeFunc::Memory, mem);
413 }
414
415 uint nargs = method()->arg_size();
416 // blow away old call arguments
417 Node* top = C->top();
418 for (uint i1 = 0; i1 < nargs; i1++) {
419 map->set_req(TypeFunc::Parms + i1, top);
420 }
421 jvms->set_map(map);
422
423 // Make enough space in the expression stack to transfer
424 // the incoming arguments and return value.
425 map->ensure_stack(jvms, jvms->method()->max_stack());
426 for (uint i1 = 0; i1 < nargs; i1++) {
427 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
428 }
429
430 C->print_inlining_assert_ready();
431
432 C->print_inlining_move_to(this);
433
434 C->log_late_inline(this);
435
436 // This check is done here because for_method_handle_inline() method
437 // needs jvms for inlined state.
438 if (!do_late_inline_check(jvms)) {
439 map->disconnect_inputs(NULL, C);
440 return;
441 }
442
443 // Setup default node notes to be picked up by the inlining
444 Node_Notes* old_nn = C->node_notes_at(call->_idx);
445 if (old_nn != NULL) {
446 Node_Notes* entry_nn = old_nn->clone(C);
447 entry_nn->set_jvms(jvms);
450
451 // Now perform the inlining using the synthesized JVMState
452 JVMState* new_jvms = _inline_cg->generate(jvms);
453 if (new_jvms == NULL) return; // no change
454 if (C->failing()) return;
455
456 // Capture any exceptional control flow
457 GraphKit kit(new_jvms);
458
459 // Find the result object
460 Node* result = C->top();
461 int result_size = method()->return_type()->size();
462 if (result_size != 0 && !kit.stopped()) {
463 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
464 }
465
466 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
467 C->env()->notice_inlined_method(_inline_cg->method());
468 C->set_inlining_progress(true);
469 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
470 kit.replace_call(call, result, true);
471 }
472 }
473
474
475 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
476 return new LateInlineCallGenerator(method, inline_cg);
477 }
478
479 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
480 ciMethod* _caller;
481 int _attempt;
482 bool _input_not_const;
483
484 virtual bool do_late_inline_check(JVMState* jvms);
485 virtual bool already_attempted() const { return _attempt > 0; }
486
487 public:
488 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
489 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
490
491 virtual bool is_mh_late_inline() const { return true; }
492
493 virtual JVMState* generate(JVMState* jvms) {
494 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
495
496 Compile* C = Compile::current();
497 if (_input_not_const) {
498 // inlining won't be possible so no need to enqueue right now.
499 call_node()->set_generator(this);
500 } else {
501 C->add_late_inline(this);
502 }
503 return new_jvms;
504 }
505 };
506
507 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
508
509 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
510
511 Compile::current()->print_inlining_update_delayed(this);
512
513 if (!_input_not_const) {
514 _attempt++;
515 }
516
517 if (cg != NULL && cg->is_inline()) {
518 assert(!cg->is_late_inline(), "we're doing late inlining");
519 _inline_cg = cg;
520 Compile::current()->dec_number_of_mh_late_inlines();
521 return true;
522 }
523
524 call_node()->set_generator(this);
525 return false;
526 }
527
528 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
529 Compile::current()->inc_number_of_mh_late_inlines();
530 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
531 return cg;
532 }
533
534 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
535
536 public:
537 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
767 // Inline failed, so make a direct call.
768 assert(_if_hit->is_inline(), "must have been a failed inline");
769 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
770 new_jvms = cg->generate(kit.sync_jvms());
771 }
772 kit.add_exception_states_from(new_jvms);
773 kit.set_jvms(new_jvms);
774
775 // Need to merge slow and fast?
776 if (slow_map == NULL) {
777 // The fast path is the only path remaining.
778 return kit.transfer_exceptions_into_jvms();
779 }
780
781 if (kit.stopped()) {
782 // Inlined method threw an exception, so it's just the slow path after all.
783 kit.set_jvms(slow_jvms);
784 return kit.transfer_exceptions_into_jvms();
785 }
786
787 // There are 2 branches and the replaced nodes are only valid on
788 // one: restore the replaced nodes to what they were before the
789 // branch.
790 kit.map()->set_replaced_nodes(replaced_nodes);
791
792 // Finish the diamond.
793 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
794 RegionNode* region = new RegionNode(3);
795 region->init_req(1, kit.control());
796 region->init_req(2, slow_map->control());
797 kit.set_control(gvn.transform(region));
798 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
799 iophi->set_req(2, slow_map->i_o());
800 kit.set_i_o(gvn.transform(iophi));
801 // Merge memory
802 kit.merge_memory(slow_map->merged_memory(), region, 2);
803 // Transform new memory Phis.
804 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
805 Node* phi = mms.memory();
806 if (phi->is_Phi() && phi->in(0) == region) {
807 mms.set_memory(gvn.transform(phi));
808 }
809 }
810 uint tos = kit.jvms()->stkoff() + kit.sp();
811 uint limit = slow_map->req();
812 for (uint i = TypeFunc::Parms; i < limit; i++) {
813 // Skip unused stack slots; fast forward to monoff();
814 if (i == tos) {
815 i = kit.jvms()->monoff();
816 if( i >= limit ) break;
817 }
818 Node* m = kit.map()->in(i);
819 Node* n = slow_map->in(i);
820 if (m != n) {
821 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
822 Node* phi = PhiNode::make(region, m, t);
823 phi->set_req(2, n);
824 kit.map()->set_req(i, gvn.transform(phi));
825 }
826 }
827 return kit.transfer_exceptions_into_jvms();
828 }
829
830
831 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
832 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
833 bool input_not_const;
834 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
835 Compile* C = Compile::current();
836 if (cg != NULL) {
837 if (!delayed_forbidden && AlwaysIncrementalInline) {
838 return CallGenerator::for_late_inline(callee, cg);
839 } else {
840 return cg;
841 }
842 }
843 int bci = jvms->bci();
844 ciCallProfile profile = caller->call_profile_at_bci(bci);
845 int call_site_count = caller->scale_count(profile.count());
846
847 if (IncrementalInline && call_site_count > 0 &&
848 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
849 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
850 } else {
851 // Out-of-line call.
852 return CallGenerator::for_direct_call(callee);
853 }
854 }
855
856 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
857 GraphKit kit(jvms);
858 PhaseGVN& gvn = kit.gvn();
859 Compile* C = kit.C;
860 vmIntrinsics::ID iid = callee->intrinsic_id();
861 input_not_const = true;
862 switch (iid) {
863 case vmIntrinsics::_invokeBasic:
864 {
865 // Get MethodHandle receiver:
866 Node* receiver = kit.argument(0);
867 if (receiver->Opcode() == Op_ConP) {
868 input_not_const = false;
869 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
870 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
871 const int vtable_index = Method::invalid_vtable_index;
872
873 if (!ciMethod::is_consistent_info(callee, target)) {
874 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
875 "signatures mismatch");
876 return NULL;
877 }
878
879 CallGenerator* cg = C->call_generator(target, vtable_index,
880 false /* call_does_dispatch */,
881 jvms,
882 true /* allow_inline */,
883 PROB_ALWAYS);
884 return cg;
885 } else {
886 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
887 "receiver not constant");
888 }
889 }
890 break;
891
892 case vmIntrinsics::_linkToVirtual:
893 case vmIntrinsics::_linkToStatic:
894 case vmIntrinsics::_linkToSpecial:
895 case vmIntrinsics::_linkToInterface:
896 {
897 // Get MemberName argument:
898 Node* member_name = kit.argument(callee->arg_size() - 1);
899 if (member_name->Opcode() == Op_ConP) {
900 input_not_const = false;
901 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
902 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
903
904 if (!ciMethod::is_consistent_info(callee, target)) {
905 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
906 "signatures mismatch");
907 return NULL;
908 }
909
910 // In lambda forms we erase signature types to avoid resolving issues
911 // involving class loaders. When we optimize a method handle invoke
912 // to a direct call we must cast the receiver and arguments to its
913 // actual types.
914 ciSignature* signature = target->signature();
915 const int receiver_skip = target->is_static() ? 0 : 1;
916 // Cast receiver to its type.
917 if (!target->is_static()) {
918 Node* arg = kit.argument(0);
919 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
920 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
921 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
922 const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
923 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
924 kit.set_argument(0, cast_obj);
925 }
926 }
927 // Cast reference arguments to its type.
928 for (int i = 0, j = 0; i < signature->count(); i++) {
929 ciType* t = signature->type_at(i);
930 if (t->is_klass()) {
931 Node* arg = kit.argument(receiver_skip + j);
932 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
933 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
934 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
935 const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
936 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
937 kit.set_argument(receiver_skip + j, cast_obj);
938 }
939 }
940 j += t->size(); // long and double take two slots
941 }
942
943 // Try to get the most accurate receiver type
944 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
945 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
946 int vtable_index = Method::invalid_vtable_index;
947 bool call_does_dispatch = false;
948
949 ciKlass* speculative_receiver_type = NULL;
950 if (is_virtual_or_interface) {
951 ciInstanceKlass* klass = target->holder();
952 Node* receiver_node = kit.argument(0);
953 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
954 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
955 // optimize_virtual_call() takes 2 different holder
956 // arguments for a corner case that doesn't apply here (see
957 // Parse::do_call())
958 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
959 target, receiver_type, is_virtual,
960 call_does_dispatch, vtable_index, // out-parameters
961 false /* check_access */);
962 // We lack profiling at this call but type speculation may
963 // provide us with a type
964 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
965 }
966 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
967 !StressMethodHandleLinkerInlining /* allow_inline */,
968 PROB_ALWAYS,
969 speculative_receiver_type);
970 return cg;
971 } else {
972 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
973 "member_name not constant");
974 }
975 }
976 break;
977
978 default:
979 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
980 break;
981 }
982 return NULL;
983 }
984
985
986 //------------------------PredicatedIntrinsicGenerator------------------------------
987 // Internal class which handles all predicated Intrinsic calls.
988 class PredicatedIntrinsicGenerator : public CallGenerator {
989 CallGenerator* _intrinsic;
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "opto/valuetypenode.hpp"
43 #include "runtime/sharedRuntime.hpp"
44
45 // Utility function.
46 const TypeFunc* CallGenerator::tf() const {
47 return TypeFunc::make(method());
48 }
49
50 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
51 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
52 }
53
54 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
55 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
56 return is_inlined_method_handle_intrinsic(symbolic_info, m);
57 }
58
59 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
60 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
61 }
62
107 GraphKit& exits = parser.exits();
108
109 if (C->failing()) {
110 while (exits.pop_exception_state() != NULL) ;
111 return NULL;
112 }
113
114 assert(exits.jvms()->same_calls_as(jvms), "sanity");
115
116 // Simply return the exit state of the parser,
117 // augmented by any exceptional states.
118 return exits.transfer_exceptions_into_jvms();
119 }
120
121 //---------------------------DirectCallGenerator------------------------------
122 // Internal class which handles all out-of-line calls w/o receiver type checks.
123 class DirectCallGenerator : public CallGenerator {
124 private:
125 CallStaticJavaNode* _call_node;
126 // Force separate memory and I/O projections for the exceptional
127 // paths to facilitate late inlining.
128 bool _separate_io_proj;
129
130 public:
131 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
132 : CallGenerator(method),
133 _call_node(NULL),
134 _separate_io_proj(separate_io_proj)
135 {
136 if (ValueTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
137 // If that call has not been optimized by the time optimizations are over,
138 // we'll need to add a call to create a value type instance from the klass
139 // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
140 // Separating memory and I/O projections for exceptions is required to
141 // perform that graph transformation.
142 _separate_io_proj = true;
143 }
144 }
145 virtual JVMState* generate(JVMState* jvms);
146
147 CallStaticJavaNode* call_node() const { return _call_node; }
148 };
149
150 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
151 GraphKit kit(jvms);
152 kit.C->print_inlining_update(this);
153 PhaseGVN& gvn = kit.gvn();
154 bool is_static = method()->is_static();
155 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
156 : SharedRuntime::get_resolve_opt_virtual_call_stub();
157
158 if (kit.C->log() != NULL) {
159 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
160 }
161
162 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
163 if (is_inlined_method_handle_intrinsic(jvms, method())) {
164 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
165 // additional information about the method being invoked should be attached
166 // to the call site to make resolution logic work
167 // (see SharedRuntime::resolve_static_call_C).
168 call->set_override_symbolic_info(true);
169 }
170 _call_node = call; // Save the call node in case we need it later
171 if (!is_static) {
172 // Make an explicit receiver null_check as part of this call.
173 // Since we share a map with the caller, his JVMS gets adjusted.
174 kit.null_check_receiver_before_call(method());
175 if (kit.stopped()) {
176 // And dump it back to the caller, decorated with any exceptions:
177 return kit.transfer_exceptions_into_jvms();
178 }
179 // Mark the call node as virtual, sort of:
180 call->set_optimized_virtual(true);
181 if (method()->is_method_handle_intrinsic() ||
182 method()->is_compiled_lambda_form()) {
183 call->set_method_handle_invoke(true);
184 }
185 }
186 kit.set_arguments_for_java_call(call, is_late_inline());
187 if (kit.stopped()) {
188 return kit.transfer_exceptions_into_jvms();
189 }
190 kit.set_edges_for_java_call(call, false, _separate_io_proj);
191 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
192 kit.push_node(method()->return_type()->basic_type(), ret);
193 return kit.transfer_exceptions_into_jvms();
194 }
195
196 //--------------------------VirtualCallGenerator------------------------------
197 // Internal class which handles all out-of-line calls checking receiver type.
198 class VirtualCallGenerator : public CallGenerator {
199 private:
200 int _vtable_index;
201 public:
202 VirtualCallGenerator(ciMethod* method, int vtable_index)
203 : CallGenerator(method), _vtable_index(vtable_index)
204 {
205 assert(vtable_index == Method::invalid_vtable_index ||
206 vtable_index >= 0, "either invalid or usable");
207 }
208 virtual bool is_virtual() const { return true; }
209 virtual JVMState* generate(JVMState* jvms);
210 };
211
212 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
213 GraphKit kit(jvms);
214 Node* receiver = kit.argument(0);
215 kit.C->print_inlining_update(this);
216
217 if (kit.C->log() != NULL) {
218 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
219 }
220
221 // If the receiver is a constant null, do not torture the system
222 // by attempting to call through it. The compile will proceed
223 // correctly, but may bail out in final_graph_reshaping, because
224 // the call instruction will have a seemingly deficient out-count.
225 // (The bailout says something misleading about an "infinite loop".)
226 if (!receiver->is_ValueType() && kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
227 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
228 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
229 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
230 kit.inc_sp(arg_size); // restore arguments
231 kit.uncommon_trap(Deoptimization::Reason_null_check,
232 Deoptimization::Action_none,
233 NULL, "null receiver");
234 return kit.transfer_exceptions_into_jvms();
235 }
236
237 // Ideally we would unconditionally do a null check here and let it
238 // be converted to an implicit check based on profile information.
239 // However currently the conversion to implicit null checks in
240 // Block::implicit_null_check() only looks for loads and stores, not calls.
241 ciMethod *caller = kit.method();
242 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
243 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
244 ((ImplicitNullCheckThreshold > 0) && caller_md &&
245 (caller_md->trap_count(Deoptimization::Reason_null_check)
246 >= (uint)ImplicitNullCheckThreshold))) {
252 return kit.transfer_exceptions_into_jvms();
253 }
254 }
255
256 assert(!method()->is_static(), "virtual call must not be to static");
257 assert(!method()->is_final(), "virtual call should not be to final");
258 assert(!method()->is_private(), "virtual call should not be to private");
259 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
260 "no vtable calls if +UseInlineCaches ");
261 address target = SharedRuntime::get_resolve_virtual_call_stub();
262 // Normal inline cache used for call
263 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
264 if (is_inlined_method_handle_intrinsic(jvms, method())) {
265 // To be able to issue a direct call (optimized virtual or virtual)
266 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
267 // about the method being invoked should be attached to the call site to
268 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
269 call->set_override_symbolic_info(true);
270 }
271 kit.set_arguments_for_java_call(call);
272 if (kit.stopped()) {
273 return kit.transfer_exceptions_into_jvms();
274 }
275 kit.set_edges_for_java_call(call);
276 Node* ret = kit.set_results_for_java_call(call);
277 kit.push_node(method()->return_type()->basic_type(), ret);
278
279 // Represent the effect of an implicit receiver null_check
280 // as part of this call. Since we share a map with the caller,
281 // his JVMS gets adjusted.
282 kit.cast_not_null(receiver);
283 return kit.transfer_exceptions_into_jvms();
284 }
285
286 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
287 if (InlineTree::check_can_parse(m) != NULL) return NULL;
288 return new ParseGenerator(m, expected_uses);
289 }
290
291 // As a special case, the JVMS passed to this CallGenerator is
292 // for the method execution already in progress, not just the JVMS
293 // of the caller. Thus, this CallGenerator cannot be mixed with others!
294 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
355 C->print_inlining_update_delayed(this);
356 }
357
358 virtual void set_unique_id(jlong id) {
359 _unique_id = id;
360 }
361
362 virtual jlong unique_id() const {
363 return _unique_id;
364 }
365 };
366
367 void LateInlineCallGenerator::do_late_inline() {
368 // Can't inline it
369 CallStaticJavaNode* call = call_node();
370 if (call == NULL || call->outcnt() == 0 ||
371 call->in(0) == NULL || call->in(0)->is_top()) {
372 return;
373 }
374
375 const TypeTuple *r = call->tf()->domain_cc();
376 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
377 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
378 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
379 return;
380 }
381 }
382
383 if (call->in(TypeFunc::Memory)->is_top()) {
384 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
385 return;
386 }
387
388 // check for unreachable loop
389 CallProjections* callprojs = call->extract_projections(true);
390 if (callprojs->fallthrough_catchproj == call->in(0) ||
391 callprojs->catchall_catchproj == call->in(0) ||
392 callprojs->fallthrough_memproj == call->in(TypeFunc::Memory) ||
393 callprojs->catchall_memproj == call->in(TypeFunc::Memory) ||
394 callprojs->fallthrough_ioproj == call->in(TypeFunc::I_O) ||
395 callprojs->catchall_ioproj == call->in(TypeFunc::I_O) ||
396 (callprojs->exobj != NULL && call->find_edge(callprojs->exobj) != -1)) {
397 return;
398 }
399 bool result_not_used = true;
400 for (uint i = 0; i < callprojs->nb_resproj; i++) {
401 if (callprojs->resproj[i] != NULL) {
402 if (callprojs->resproj[i]->outcnt() != 0) {
403 result_not_used = false;
404 }
405 if (call->find_edge(callprojs->resproj[i]) != -1) {
406 return;
407 }
408 }
409 }
410
411 Compile* C = Compile::current();
412 // Remove inlined methods from Compiler's lists.
413 if (call->is_macro()) {
414 C->remove_macro_node(call);
415 }
416
417 if (_is_pure_call && result_not_used) {
418 // The call is marked as pure (no important side effects), but result isn't used.
419 // It's safe to remove the call.
420 GraphKit kit(call->jvms());
421 kit.replace_call(call, C->top(), true);
422 } else {
423 // Make a clone of the JVMState that appropriate to use for driving a parse
424 JVMState* old_jvms = call->jvms();
425 JVMState* jvms = old_jvms->clone_shallow(C);
426 uint size = call->req();
427 SafePointNode* map = new SafePointNode(size, jvms);
428 for (uint i1 = 0; i1 < size; i1++) {
429 map->init_req(i1, call->in(i1));
430 }
431
432 PhaseGVN& gvn = *C->initial_gvn();
433 // Make sure the state is a MergeMem for parsing.
434 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
435 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
436 gvn.set_type_bottom(mem);
437 map->set_req(TypeFunc::Memory, mem);
438 }
439
440 // blow away old call arguments
441 Node* top = C->top();
442 for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) {
443 map->set_req(i1, top);
444 }
445 jvms->set_map(map);
446
447 // Make enough space in the expression stack to transfer
448 // the incoming arguments and return value.
449 map->ensure_stack(jvms, jvms->method()->max_stack());
450 const TypeTuple *domain_sig = call->_tf->domain_sig();
451 ExtendedSignature sig_cc = ExtendedSignature(method()->get_sig_cc(), SigEntryFilter());
452 uint nargs = method()->arg_size();
453 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
454
455 uint j = TypeFunc::Parms;
456 for (uint i1 = 0; i1 < nargs; i1++) {
457 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
458 if (method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) {
459 // Value type arguments are not passed by reference: we get an argument per
460 // field of the value type. Build ValueTypeNodes from the value type arguments.
461 GraphKit arg_kit(jvms, &gvn);
462 arg_kit.set_control(map->control());
463 ValueTypeNode* vt = ValueTypeNode::make_from_multi(&arg_kit, call, sig_cc, t->value_klass(), j, true);
464 map->set_control(arg_kit.control());
465 map->set_argument(jvms, i1, vt);
466 } else {
467 map->set_argument(jvms, i1, call->in(j++));
468 BasicType bt = t->basic_type();
469 while (SigEntry::next_is_reserved(sig_cc, bt, true)) {
470 j += type2size[bt]; // Skip reserved arguments
471 }
472 }
473 }
474
475 C->print_inlining_assert_ready();
476
477 C->print_inlining_move_to(this);
478
479 C->log_late_inline(this);
480
481 // This check is done here because for_method_handle_inline() method
482 // needs jvms for inlined state.
483 if (!do_late_inline_check(jvms)) {
484 map->disconnect_inputs(NULL, C);
485 return;
486 }
487
488 // Setup default node notes to be picked up by the inlining
489 Node_Notes* old_nn = C->node_notes_at(call->_idx);
490 if (old_nn != NULL) {
491 Node_Notes* entry_nn = old_nn->clone(C);
492 entry_nn->set_jvms(jvms);
495
496 // Now perform the inlining using the synthesized JVMState
497 JVMState* new_jvms = _inline_cg->generate(jvms);
498 if (new_jvms == NULL) return; // no change
499 if (C->failing()) return;
500
501 // Capture any exceptional control flow
502 GraphKit kit(new_jvms);
503
504 // Find the result object
505 Node* result = C->top();
506 int result_size = method()->return_type()->size();
507 if (result_size != 0 && !kit.stopped()) {
508 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
509 }
510
511 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
512 C->env()->notice_inlined_method(_inline_cg->method());
513 C->set_inlining_progress(true);
514 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
515
516 // Handle value type returns
517 bool returned_as_fields = call->tf()->returns_value_type_as_fields();
518 if (result->is_ValueType()) {
519 ValueTypeNode* vt = result->as_ValueType();
520 if (returned_as_fields) {
521 // Return of multiple values (the fields of a value type)
522 vt->replace_call_results(&kit, call, C);
523 if (vt->is_allocated(&gvn) && !StressValueTypeReturnedAsFields) {
524 result = vt->get_oop();
525 } else {
526 result = vt->tagged_klass(gvn);
527 }
528 } else {
529 result = ValueTypePtrNode::make_from_value_type(&kit, vt);
530 }
531 } else if (gvn.type(result)->is_valuetypeptr() && returned_as_fields) {
532 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms);
533 Node* cast = new CheckCastPPNode(NULL, result, vt_t);
534 gvn.record_for_igvn(cast);
535 ValueTypePtrNode* vtptr = ValueTypePtrNode::make_from_oop(&kit, gvn.transform(cast));
536 vtptr->replace_call_results(&kit, call, C);
537 result = cast;
538 }
539
540 kit.replace_call(call, result, true);
541 }
542 }
543
544
545 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
546 return new LateInlineCallGenerator(method, inline_cg);
547 }
548
549 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
550 ciMethod* _caller;
551 int _attempt;
552 bool _input_not_const;
553
554 virtual bool do_late_inline_check(JVMState* jvms);
555 virtual bool already_attempted() const { return _attempt > 0; }
556
557 public:
558 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
559 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
560
561 virtual bool is_mh_late_inline() const { return true; }
562
563 virtual JVMState* generate(JVMState* jvms) {
564 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
565
566 Compile* C = Compile::current();
567 if (_input_not_const) {
568 // inlining won't be possible so no need to enqueue right now.
569 call_node()->set_generator(this);
570 } else {
571 C->add_late_inline(this);
572 }
573 return new_jvms;
574 }
575 };
576
577 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
578
579 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const, AlwaysIncrementalInline);
580
581 Compile::current()->print_inlining_update_delayed(this);
582
583 if (!_input_not_const) {
584 _attempt++;
585 }
586
587 if (cg != NULL && (cg->is_inline() || cg->is_inlined_method_handle_intrinsic(jvms, cg->method()))) {
588 assert(!cg->is_late_inline(), "we're doing late inlining");
589 _inline_cg = cg;
590 Compile::current()->dec_number_of_mh_late_inlines();
591 return true;
592 }
593
594 call_node()->set_generator(this);
595 return false;
596 }
597
598 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
599 Compile::current()->inc_number_of_mh_late_inlines();
600 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
601 return cg;
602 }
603
604 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
605
606 public:
607 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
837 // Inline failed, so make a direct call.
838 assert(_if_hit->is_inline(), "must have been a failed inline");
839 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
840 new_jvms = cg->generate(kit.sync_jvms());
841 }
842 kit.add_exception_states_from(new_jvms);
843 kit.set_jvms(new_jvms);
844
845 // Need to merge slow and fast?
846 if (slow_map == NULL) {
847 // The fast path is the only path remaining.
848 return kit.transfer_exceptions_into_jvms();
849 }
850
851 if (kit.stopped()) {
852 // Inlined method threw an exception, so it's just the slow path after all.
853 kit.set_jvms(slow_jvms);
854 return kit.transfer_exceptions_into_jvms();
855 }
856
857 // Allocate value types if they are merged with objects (similar to Parse::merge_common())
858 uint tos = kit.jvms()->stkoff() + kit.sp();
859 uint limit = slow_map->req();
860 for (uint i = TypeFunc::Parms; i < limit; i++) {
861 Node* m = kit.map()->in(i);
862 Node* n = slow_map->in(i);
863 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
864 if (m->is_ValueType() && !t->isa_valuetype()) {
865 // Allocate value type in fast path
866 m = ValueTypePtrNode::make_from_value_type(&kit, m->as_ValueType());
867 kit.map()->set_req(i, m);
868 }
869 if (n->is_ValueType() && !t->isa_valuetype()) {
870 // Allocate value type in slow path
871 PreserveJVMState pjvms(&kit);
872 kit.set_map(slow_map);
873 n = ValueTypePtrNode::make_from_value_type(&kit, n->as_ValueType());
874 kit.map()->set_req(i, n);
875 slow_map = kit.stop();
876 }
877 }
878
879 // There are 2 branches and the replaced nodes are only valid on
880 // one: restore the replaced nodes to what they were before the
881 // branch.
882 kit.map()->set_replaced_nodes(replaced_nodes);
883
884 // Finish the diamond.
885 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
886 RegionNode* region = new RegionNode(3);
887 region->init_req(1, kit.control());
888 region->init_req(2, slow_map->control());
889 kit.set_control(gvn.transform(region));
890 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
891 iophi->set_req(2, slow_map->i_o());
892 kit.set_i_o(gvn.transform(iophi));
893 // Merge memory
894 kit.merge_memory(slow_map->merged_memory(), region, 2);
895 // Transform new memory Phis.
896 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
897 Node* phi = mms.memory();
898 if (phi->is_Phi() && phi->in(0) == region) {
899 mms.set_memory(gvn.transform(phi));
900 }
901 }
902 for (uint i = TypeFunc::Parms; i < limit; i++) {
903 // Skip unused stack slots; fast forward to monoff();
904 if (i == tos) {
905 i = kit.jvms()->monoff();
906 if( i >= limit ) break;
907 }
908 Node* m = kit.map()->in(i);
909 Node* n = slow_map->in(i);
910 if (m != n) {
911 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
912 Node* phi = PhiNode::make(region, m, t);
913 phi->set_req(2, n);
914 kit.map()->set_req(i, gvn.transform(phi));
915 }
916 }
917 return kit.transfer_exceptions_into_jvms();
918 }
919
920
921 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
922 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
923 bool input_not_const;
924 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const, false);
925 Compile* C = Compile::current();
926 if (cg != NULL) {
927 if (!delayed_forbidden && AlwaysIncrementalInline) {
928 return CallGenerator::for_late_inline(callee, cg);
929 } else {
930 return cg;
931 }
932 }
933 int bci = jvms->bci();
934 ciCallProfile profile = caller->call_profile_at_bci(bci);
935 int call_site_count = caller->scale_count(profile.count());
936
937 if (IncrementalInline && (AlwaysIncrementalInline ||
938 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
939 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
940 } else {
941 // Out-of-line call.
942 return CallGenerator::for_direct_call(callee);
943 }
944 }
945
946 static void cast_argument(int nargs, int arg_nb, ciType* t, GraphKit& kit) {
947 PhaseGVN& gvn = kit.gvn();
948 Node* arg = kit.argument(arg_nb);
949 const Type* arg_type = arg->bottom_type();
950 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
951 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
952 const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
953 arg = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
954 kit.set_argument(arg_nb, arg);
955 }
956 if (sig_type->is_valuetypeptr() && !arg->is_ValueType() &&
957 !kit.gvn().type(arg)->maybe_null() && t->as_value_klass()->is_scalarizable()) {
958 arg = ValueTypeNode::make_from_oop(&kit, arg, t->as_value_klass());
959 kit.set_argument(arg_nb, arg);
960 }
961 }
962
963 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) {
964 GraphKit kit(jvms);
965 PhaseGVN& gvn = kit.gvn();
966 Compile* C = kit.C;
967 vmIntrinsics::ID iid = callee->intrinsic_id();
968 input_not_const = true;
969 switch (iid) {
970 case vmIntrinsics::_invokeBasic:
971 {
972 // Get MethodHandle receiver:
973 Node* receiver = kit.argument(0);
974 if (receiver->Opcode() == Op_ConP) {
975 input_not_const = false;
976 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
977 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
978 const int vtable_index = Method::invalid_vtable_index;
979
980 if (!ciMethod::is_consistent_info(callee, target)) {
981 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
982 "signatures mismatch");
983 return NULL;
984 }
985
986 CallGenerator* cg = C->call_generator(target, vtable_index,
987 false /* call_does_dispatch */,
988 jvms,
989 true /* allow_inline */,
990 PROB_ALWAYS,
991 NULL,
992 true,
993 delayed_forbidden);
994 return cg;
995 } else {
996 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
997 "receiver not constant");
998 }
999 }
1000 break;
1001
1002 case vmIntrinsics::_linkToVirtual:
1003 case vmIntrinsics::_linkToStatic:
1004 case vmIntrinsics::_linkToSpecial:
1005 case vmIntrinsics::_linkToInterface:
1006 {
1007 int nargs = callee->arg_size();
1008 // Get MemberName argument:
1009 Node* member_name = kit.argument(nargs - 1);
1010 if (member_name->Opcode() == Op_ConP) {
1011 input_not_const = false;
1012 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1013 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1014
1015 if (!ciMethod::is_consistent_info(callee, target)) {
1016 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1017 "signatures mismatch");
1018 return NULL;
1019 }
1020
1021 // In lambda forms we erase signature types to avoid resolving issues
1022 // involving class loaders. When we optimize a method handle invoke
1023 // to a direct call we must cast the receiver and arguments to its
1024 // actual types.
1025 ciSignature* signature = target->signature();
1026 const int receiver_skip = target->is_static() ? 0 : 1;
1027 // Cast receiver to its type.
1028 if (!target->is_static()) {
1029 cast_argument(nargs, 0, signature->accessing_klass(), kit);
1030 }
1031 // Cast reference arguments to its type.
1032 for (int i = 0, j = 0; i < signature->count(); i++) {
1033 ciType* t = signature->type_at(i);
1034 if (t->is_klass()) {
1035 cast_argument(nargs, receiver_skip + j, t, kit);
1036 }
1037 j += t->size(); // long and double take two slots
1038 }
1039
1040 // Try to get the most accurate receiver type
1041 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
1042 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1043 int vtable_index = Method::invalid_vtable_index;
1044 bool call_does_dispatch = false;
1045
1046 ciKlass* speculative_receiver_type = NULL;
1047 if (is_virtual_or_interface) {
1048 ciInstanceKlass* klass = target->holder();
1049 Node* receiver_node = kit.argument(0);
1050 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1051 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
1052 // optimize_virtual_call() takes 2 different holder
1053 // arguments for a corner case that doesn't apply here (see
1054 // Parse::do_call())
1055 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
1056 target, receiver_type, is_virtual,
1057 call_does_dispatch, vtable_index, // out-parameters
1058 false /* check_access */);
1059 // We lack profiling at this call but type speculation may
1060 // provide us with a type
1061 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1062 }
1063 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1064 !StressMethodHandleLinkerInlining /* allow_inline */,
1065 PROB_ALWAYS,
1066 speculative_receiver_type,
1067 true,
1068 delayed_forbidden);
1069 return cg;
1070 } else {
1071 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1072 "member_name not constant");
1073 }
1074 }
1075 break;
1076
1077 default:
1078 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
1079 break;
1080 }
1081 return NULL;
1082 }
1083
1084
1085 //------------------------PredicatedIntrinsicGenerator------------------------------
1086 // Internal class which handles all predicated Intrinsic calls.
1087 class PredicatedIntrinsicGenerator : public CallGenerator {
1088 CallGenerator* _intrinsic;
|