109 // Simply return the exit state of the parser,
110 // augmented by any exceptional states.
111 return exits.transfer_exceptions_into_jvms();
112 }
113
114 //---------------------------DirectCallGenerator------------------------------
115 // Internal class which handles all out-of-line calls w/o receiver type checks.
116 class DirectCallGenerator : public CallGenerator {
117 private:
118 CallStaticJavaNode* _call_node;
119 // Force separate memory and I/O projections for the exceptional
120 // paths to facilitate late inlining.
121 bool _separate_io_proj;
122
123 public:
124 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
125 : CallGenerator(method),
126 _separate_io_proj(separate_io_proj)
127 {
128 if (method->is_method_handle_intrinsic() &&
129 method->signature()->return_type() == ciEnv::current()->___Value_klass()) {
130 // If that call has not been optimized by the time optimizations
131 // are over, we'll need to add a call to create a value type
132 // instance from the klass returned by the call. Separating
133 // memory and I/O projections for exceptions is required to
134 // perform that graph transformation.
135 _separate_io_proj = true;
136 }
137 }
138 virtual JVMState* generate(JVMState* jvms);
139
140 CallStaticJavaNode* call_node() const { return _call_node; }
141 };
142
143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144 GraphKit kit(jvms);
145 kit.C->print_inlining_update(this);
146 PhaseGVN& gvn = kit.gvn();
147 bool is_static = method()->is_static();
148 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
149 : SharedRuntime::get_resolve_opt_virtual_call_stub();
167 // Since we share a map with the caller, his JVMS gets adjusted.
168 kit.null_check_receiver_before_call(method());
169 }
170 if (kit.stopped()) {
171 // And dump it back to the caller, decorated with any exceptions:
172 return kit.transfer_exceptions_into_jvms();
173 }
174 // Mark the call node as virtual, sort of:
175 call->set_optimized_virtual(true);
176 if (method()->is_method_handle_intrinsic() ||
177 method()->is_compiled_lambda_form()) {
178 call->set_method_handle_invoke(true);
179 }
180 }
181 kit.set_arguments_for_java_call(call);
182 kit.set_edges_for_java_call(call, false, _separate_io_proj);
183 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
184 // Check if return value is a value type pointer
185 const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr();
186 if (vtptr != NULL) {
187 if (vtptr->klass() != kit.C->env()->___Value_klass()) {
188 // Create ValueTypeNode from the oop and replace the return value
189 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
190 kit.push_node(T_VALUETYPE, vt);
191 } else {
192 kit.push_node(T_VALUETYPE, ret);
193 }
194 } else {
195 kit.push_node(method()->return_type()->basic_type(), ret);
196 }
197 return kit.transfer_exceptions_into_jvms();
198 }
199
200 //--------------------------VirtualCallGenerator------------------------------
201 // Internal class which handles all out-of-line calls checking receiver type.
202 class VirtualCallGenerator : public CallGenerator {
203 private:
204 int _vtable_index;
205 public:
206 VirtualCallGenerator(ciMethod* method, int vtable_index)
207 : CallGenerator(method), _vtable_index(vtable_index)
208 {
209 assert(vtable_index == Method::invalid_vtable_index ||
262 assert(!method()->is_final(), "virtual call should not be to final");
263 assert(!method()->is_private(), "virtual call should not be to private");
264 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
265 "no vtable calls if +UseInlineCaches ");
266 address target = SharedRuntime::get_resolve_virtual_call_stub();
267 // Normal inline cache used for call
268 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
269 if (is_inlined_method_handle_intrinsic(jvms, method())) {
270 // To be able to issue a direct call (optimized virtual or virtual)
271 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
272 // about the method being invoked should be attached to the call site to
273 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
274 call->set_override_symbolic_info(true);
275 }
276 kit.set_arguments_for_java_call(call);
277 kit.set_edges_for_java_call(call);
278 Node* ret = kit.set_results_for_java_call(call);
279 // Check if return value is a value type pointer
280 if (gvn.type(ret)->isa_valuetypeptr()) {
281 // Create ValueTypeNode from the oop and replace the return value
282 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
283 kit.push_node(T_VALUETYPE, vt);
284 } else {
285 kit.push_node(method()->return_type()->basic_type(), ret);
286 }
287
288 // Represent the effect of an implicit receiver null_check
289 // as part of this call. Since we share a map with the caller,
290 // his JVMS gets adjusted.
291 kit.cast_not_null(receiver);
292 return kit.transfer_exceptions_into_jvms();
293 }
294
295 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
296 if (InlineTree::check_can_parse(m) != NULL) return NULL;
297 return new ParseGenerator(m, expected_uses);
298 }
299
300 // As a special case, the JVMS passed to this CallGenerator is
301 // for the method execution already in progress, not just the JVMS
302 // of the caller. Thus, this CallGenerator cannot be mixed with others!
420 // blow away old call arguments
421 Node* top = C->top();
422 for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) {
423 map->set_req(i1, top);
424 }
425 jvms->set_map(map);
426
427 // Make enough space in the expression stack to transfer
428 // the incoming arguments and return value.
429 map->ensure_stack(jvms, jvms->method()->max_stack());
430 const TypeTuple *domain_sig = call->_tf->domain_sig();
431 uint nargs = method()->arg_size();
432 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
433
434 uint j = TypeFunc::Parms;
435 for (uint i1 = 0; i1 < nargs; i1++) {
436 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
437 if (!ValueTypePassFieldsAsArgs) {
438 Node* arg = call->in(TypeFunc::Parms + i1);
439 if (t->isa_valuetypeptr()) {
440 arg = ValueTypeNode::make(gvn, map->memory(), arg);
441 }
442 map->set_argument(jvms, i1, arg);
443 } else {
444 if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
445 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
446 Node* vt = ValueTypeNode::make(gvn, call, vk, j, true);
447 map->set_argument(jvms, i1, gvn.transform(vt));
448 j += vk->value_arg_slots();
449 } else {
450 map->set_argument(jvms, i1, call->in(j));
451 j++;
452 }
453 }
454 }
455
456 C->print_inlining_assert_ready();
457
458 C->print_inlining_move_to(this);
459
460 C->log_late_inline(this);
461
462 // This check is done here because for_method_handle_inline() method
463 // needs jvms for inlined state.
464 if (!do_late_inline_check(jvms)) {
465 map->disconnect_inputs(NULL, C);
466 return;
479 if (new_jvms == NULL) return; // no change
480 if (C->failing()) return;
481
482 // Capture any exceptional control flow
483 GraphKit kit(new_jvms);
484
485 // Find the result object
486 Node* result = C->top();
487 ciType* return_type = _inline_cg->method()->return_type();
488 int result_size = return_type->size();
489 if (result_size != 0 && !kit.stopped()) {
490 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
491 }
492
493 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
494 C->env()->notice_inlined_method(_inline_cg->method());
495 C->set_inlining_progress(true);
496
497 if (return_type->is_valuetype()) {
498 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms);
499 if (result->is_ValueType()) {
500 ValueTypeNode* vt = result->as_ValueType();
501 if (!call->tf()->returns_value_type_as_fields()) {
502 result = vt->allocate(&kit);
503 result = C->initial_gvn()->transform(new ValueTypePtrNode(vt, result, C));
504 } else {
505 // Return of multiple values (the fields of a value type)
506 vt->replace_call_results(call, C);
507 if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) {
508 result = vt->tagged_klass(gvn);
509 } else {
510 result = vt->get_oop();
511 }
512 }
513 } else {
514 if (vt_t->is_valuetypeptr()->value_type()->value_klass() != C->env()->___Value_klass()) {
515 if (gvn.type(result)->isa_valuetypeptr() && call->tf()->returns_value_type_as_fields()) {
516 Node* cast = new CheckCastPPNode(NULL, result, vt_t);
517 gvn.record_for_igvn(cast);
518 ValueTypePtrNode* vtptr = ValueTypePtrNode::make(gvn, kit.merged_memory(), gvn.transform(cast));
519 vtptr->replace_call_results(call, C);
520 result = cast;
521 } else {
522 assert(result->is_top(), "what else?");
523 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
524 ProjNode *pn = call->fast_out(i)->as_Proj();
525 uint con = pn->_con;
526 if (con >= TypeFunc::Parms) {
527 C->initial_gvn()->hash_delete(pn);
528 pn->set_req(0, C->top());
529 --i; --imax;
530 }
531 }
532 }
533 }
534 }
535 }
536
537 kit.replace_call(call, result, true);
538 }
539
540
541 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
542 return new LateInlineCallGenerator(method, inline_cg);
543 }
544
545 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
546 ciMethod* _caller;
547 int _attempt;
548 bool _input_not_const;
549
550 virtual bool do_late_inline_check(JVMState* jvms);
551 virtual bool already_attempted() const { return _attempt > 0; }
552
553 public:
554 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
555 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
893 }
894 int bci = jvms->bci();
895 ciCallProfile profile = caller->call_profile_at_bci(bci);
896 int call_site_count = caller->scale_count(profile.count());
897
898 if (IncrementalInline && (AlwaysIncrementalInline ||
899 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
900 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
901 } else {
902 // Out-of-line call.
903 return CallGenerator::for_direct_call(callee);
904 }
905 }
906
907 static void cast_argument(int arg_nb, ciType* t, GraphKit& kit) {
908 PhaseGVN& gvn = kit.gvn();
909 Node* arg = kit.argument(arg_nb);
910 const Type* arg_type = arg->bottom_type();
911 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
912 if (t->is_valuetype()) {
913 assert(!(arg_type->isa_valuetype() && t == kit.C->env()->___Value_klass()), "need a pointer to the value type");
914 if (arg_type->isa_valuetypeptr() && t != kit.C->env()->___Value_klass()) {
915 Node* cast = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
916 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), cast);
917 kit.set_argument(arg_nb, vt);
918 } else {
919 assert(t == kit.C->env()->___Value_klass() || arg->is_ValueType(), "inconsistent argument");
920 }
921 } else {
922 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
923 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
924 kit.set_argument(arg_nb, cast_obj);
925 }
926 }
927 }
928
929 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) {
930 GraphKit kit(jvms);
931 PhaseGVN& gvn = kit.gvn();
932 Compile* C = kit.C;
933 vmIntrinsics::ID iid = callee->intrinsic_id();
934 input_not_const = true;
935 switch (iid) {
936 case vmIntrinsics::_invokeBasic:
937 {
938 // Get MethodHandle receiver:
939 Node* receiver = kit.argument(0);
|
109 // Simply return the exit state of the parser,
110 // augmented by any exceptional states.
111 return exits.transfer_exceptions_into_jvms();
112 }
113
114 //---------------------------DirectCallGenerator------------------------------
115 // Internal class which handles all out-of-line calls w/o receiver type checks.
116 class DirectCallGenerator : public CallGenerator {
117 private:
118 CallStaticJavaNode* _call_node;
119 // Force separate memory and I/O projections for the exceptional
120 // paths to facilitate late inlining.
121 bool _separate_io_proj;
122
123 public:
124 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
125 : CallGenerator(method),
126 _separate_io_proj(separate_io_proj)
127 {
128 if (method->is_method_handle_intrinsic() &&
129 method->signature()->return_type()->is__Value()) {
130 // If that call has not been optimized by the time optimizations
131 // are over, we'll need to add a call to create a value type
132 // instance from the klass returned by the call. Separating
133 // memory and I/O projections for exceptions is required to
134 // perform that graph transformation.
135 _separate_io_proj = true;
136 }
137 }
138 virtual JVMState* generate(JVMState* jvms);
139
140 CallStaticJavaNode* call_node() const { return _call_node; }
141 };
142
143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144 GraphKit kit(jvms);
145 kit.C->print_inlining_update(this);
146 PhaseGVN& gvn = kit.gvn();
147 bool is_static = method()->is_static();
148 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
149 : SharedRuntime::get_resolve_opt_virtual_call_stub();
167 // Since we share a map with the caller, his JVMS gets adjusted.
168 kit.null_check_receiver_before_call(method());
169 }
170 if (kit.stopped()) {
171 // And dump it back to the caller, decorated with any exceptions:
172 return kit.transfer_exceptions_into_jvms();
173 }
174 // Mark the call node as virtual, sort of:
175 call->set_optimized_virtual(true);
176 if (method()->is_method_handle_intrinsic() ||
177 method()->is_compiled_lambda_form()) {
178 call->set_method_handle_invoke(true);
179 }
180 }
181 kit.set_arguments_for_java_call(call);
182 kit.set_edges_for_java_call(call, false, _separate_io_proj);
183 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
184 // Check if return value is a value type pointer
185 const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr();
186 if (vtptr != NULL) {
187 if (!vtptr->is__Value()) {
188 // Create ValueTypeNode from the oop and replace the return value
189 Node* vt = ValueTypeNode::make(&kit, ret);
190 kit.push_node(T_VALUETYPE, vt);
191 } else {
192 kit.push_node(T_VALUETYPE, ret);
193 }
194 } else {
195 kit.push_node(method()->return_type()->basic_type(), ret);
196 }
197 return kit.transfer_exceptions_into_jvms();
198 }
199
200 //--------------------------VirtualCallGenerator------------------------------
201 // Internal class which handles all out-of-line calls checking receiver type.
202 class VirtualCallGenerator : public CallGenerator {
203 private:
204 int _vtable_index;
205 public:
206 VirtualCallGenerator(ciMethod* method, int vtable_index)
207 : CallGenerator(method), _vtable_index(vtable_index)
208 {
209 assert(vtable_index == Method::invalid_vtable_index ||
262 assert(!method()->is_final(), "virtual call should not be to final");
263 assert(!method()->is_private(), "virtual call should not be to private");
264 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
265 "no vtable calls if +UseInlineCaches ");
266 address target = SharedRuntime::get_resolve_virtual_call_stub();
267 // Normal inline cache used for call
268 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
269 if (is_inlined_method_handle_intrinsic(jvms, method())) {
270 // To be able to issue a direct call (optimized virtual or virtual)
271 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
272 // about the method being invoked should be attached to the call site to
273 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
274 call->set_override_symbolic_info(true);
275 }
276 kit.set_arguments_for_java_call(call);
277 kit.set_edges_for_java_call(call);
278 Node* ret = kit.set_results_for_java_call(call);
279 // Check if return value is a value type pointer
280 if (gvn.type(ret)->isa_valuetypeptr()) {
281 // Create ValueTypeNode from the oop and replace the return value
282 Node* ctl = kit.control();
283 Node* vt = ValueTypeNode::make(&kit, ret);
284 kit.set_control(ctl);
285 kit.push_node(T_VALUETYPE, vt);
286 } else {
287 kit.push_node(method()->return_type()->basic_type(), ret);
288 }
289
290 // Represent the effect of an implicit receiver null_check
291 // as part of this call. Since we share a map with the caller,
292 // his JVMS gets adjusted.
293 kit.cast_not_null(receiver);
294 return kit.transfer_exceptions_into_jvms();
295 }
296
297 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
298 if (InlineTree::check_can_parse(m) != NULL) return NULL;
299 return new ParseGenerator(m, expected_uses);
300 }
301
302 // As a special case, the JVMS passed to this CallGenerator is
303 // for the method execution already in progress, not just the JVMS
304 // of the caller. Thus, this CallGenerator cannot be mixed with others!
422 // blow away old call arguments
423 Node* top = C->top();
424 for (uint i1 = TypeFunc::Parms; i1 < call->_tf->domain_cc()->cnt(); i1++) {
425 map->set_req(i1, top);
426 }
427 jvms->set_map(map);
428
429 // Make enough space in the expression stack to transfer
430 // the incoming arguments and return value.
431 map->ensure_stack(jvms, jvms->method()->max_stack());
432 const TypeTuple *domain_sig = call->_tf->domain_sig();
433 uint nargs = method()->arg_size();
434 assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
435
436 uint j = TypeFunc::Parms;
437 for (uint i1 = 0; i1 < nargs; i1++) {
438 const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
439 if (!ValueTypePassFieldsAsArgs) {
440 Node* arg = call->in(TypeFunc::Parms + i1);
441 if (t->isa_valuetypeptr()) {
442 Node* ctl = map->control();
443 arg = ValueTypeNode::make(gvn, ctl, map->memory(), arg);
444 map->set_control(ctl);
445 }
446 map->set_argument(jvms, i1, arg);
447 } else {
448 if (t->isa_valuetypeptr() && !t->is_valuetypeptr()->is__Value()) {
449 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
450 Node* ctl = map->control();
451 Node* vt = ValueTypeNode::make(gvn, ctl, map->memory(), call, vk, j, true);
452 map->set_control(ctl);
453 map->set_argument(jvms, i1, gvn.transform(vt));
454 j += vk->value_arg_slots();
455 } else {
456 map->set_argument(jvms, i1, call->in(j));
457 j++;
458 }
459 }
460 }
461
462 C->print_inlining_assert_ready();
463
464 C->print_inlining_move_to(this);
465
466 C->log_late_inline(this);
467
468 // This check is done here because for_method_handle_inline() method
469 // needs jvms for inlined state.
470 if (!do_late_inline_check(jvms)) {
471 map->disconnect_inputs(NULL, C);
472 return;
485 if (new_jvms == NULL) return; // no change
486 if (C->failing()) return;
487
488 // Capture any exceptional control flow
489 GraphKit kit(new_jvms);
490
491 // Find the result object
492 Node* result = C->top();
493 ciType* return_type = _inline_cg->method()->return_type();
494 int result_size = return_type->size();
495 if (result_size != 0 && !kit.stopped()) {
496 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
497 }
498
499 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
500 C->env()->notice_inlined_method(_inline_cg->method());
501 C->set_inlining_progress(true);
502
503 if (return_type->is_valuetype()) {
504 const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms);
505 bool returned_as_fields = call->tf()->returns_value_type_as_fields();
506 if (result->is_ValueType()) {
507 ValueTypeNode* vt = result->as_ValueType();
508 if (!returned_as_fields) {
509 result = vt->allocate(&kit)->get_oop();
510 result = gvn.transform(new ValueTypePtrNode(vt, result, C));
511 } else {
512 // Return of multiple values (the fields of a value type)
513 vt->replace_call_results(&kit, call, C);
514 if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) {
515 result = vt->tagged_klass(gvn);
516 } else {
517 result = vt->get_oop();
518 }
519 }
520 } else if (gvn.type(result)->isa_valuetypeptr() && returned_as_fields) {
521 assert(!vt_t->is_valuetypeptr()->is__Value(), "__Value not supported");
522 Node* cast = new CheckCastPPNode(NULL, result, vt_t);
523 gvn.record_for_igvn(cast);
524 Node* ctl = kit.control();
525 ValueTypePtrNode* vtptr = ValueTypePtrNode::make(gvn, ctl, kit.merged_memory(), gvn.transform(cast));
526 kit.set_control(ctl);
527 vtptr->replace_call_results(&kit, call, C);
528 result = cast;
529 } else if (!return_type->is__Value()) {
530 assert(result->is_top(), "what else?");
531 for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
532 ProjNode *pn = call->fast_out(i)->as_Proj();
533 uint con = pn->_con;
534 if (con >= TypeFunc::Parms) {
535 gvn.hash_delete(pn);
536 pn->set_req(0, C->top());
537 --i; --imax;
538 }
539 }
540 }
541 }
542
543 kit.replace_call(call, result, true);
544 }
545
546
547 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
548 return new LateInlineCallGenerator(method, inline_cg);
549 }
550
551 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
552 ciMethod* _caller;
553 int _attempt;
554 bool _input_not_const;
555
556 virtual bool do_late_inline_check(JVMState* jvms);
557 virtual bool already_attempted() const { return _attempt > 0; }
558
559 public:
560 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
561 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
899 }
900 int bci = jvms->bci();
901 ciCallProfile profile = caller->call_profile_at_bci(bci);
902 int call_site_count = caller->scale_count(profile.count());
903
904 if (IncrementalInline && (AlwaysIncrementalInline ||
905 (call_site_count > 0 && (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
906 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
907 } else {
908 // Out-of-line call.
909 return CallGenerator::for_direct_call(callee);
910 }
911 }
912
913 static void cast_argument(int arg_nb, ciType* t, GraphKit& kit) {
914 PhaseGVN& gvn = kit.gvn();
915 Node* arg = kit.argument(arg_nb);
916 const Type* arg_type = arg->bottom_type();
917 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
918 if (t->is_valuetype()) {
919 assert(!(arg_type->isa_valuetype() && t->is__Value()), "need a pointer to the value type");
920 if (arg_type->isa_valuetypeptr() && !t->is__Value()) {
921 Node* cast = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
922 Node* vt = ValueTypeNode::make(&kit, cast);
923 kit.set_argument(arg_nb, vt);
924 } else {
925 assert(t->is__Value() || arg->is_ValueType(), "inconsistent argument");
926 }
927 } else {
928 if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) {
929 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
930 kit.set_argument(arg_nb, cast_obj);
931 }
932 }
933 }
934
935 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const, bool delayed_forbidden) {
936 GraphKit kit(jvms);
937 PhaseGVN& gvn = kit.gvn();
938 Compile* C = kit.C;
939 vmIntrinsics::ID iid = callee->intrinsic_id();
940 input_not_const = true;
941 switch (iid) {
942 case vmIntrinsics::_invokeBasic:
943 {
944 // Get MethodHandle receiver:
945 Node* receiver = kit.argument(0);
|