245 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
246 if (InlineTree::check_can_parse(m) != NULL) return NULL;
247 float past_uses = m->interpreter_invocation_count();
248 float expected_uses = past_uses;
249 return new ParseGenerator(m, expected_uses, true);
250 }
251
252 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
253 assert(!m->is_abstract(), "for_direct_call mismatch");
254 return new DirectCallGenerator(m, separate_io_proj);
255 }
256
257 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
258 assert(!m->is_static(), "for_virtual_call mismatch");
259 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
260 return new VirtualCallGenerator(m, vtable_index);
261 }
262
263 // Allow inlining decisions to be delayed
264 class LateInlineCallGenerator : public DirectCallGenerator {
265 CallGenerator* _inline_cg;
266
267 public:
268 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
269 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
270
271 virtual bool is_late_inline() const { return true; }
272
273 // Convert the CallStaticJava into an inline
274 virtual void do_late_inline();
275
276 virtual JVMState* generate(JVMState* jvms) {
277 Compile *C = Compile::current();
278 C->print_inlining_skip(this);
279
280 // Record that this call site should be revisited once the main
281 // parse is finished.
282 Compile::current()->add_late_inline(this);
283
284 // Emit the CallStaticJava and request separate projections so
285 // that the late inlining logic can distinguish between fall
286 // through and exceptional uses of the memory and io projections
287 // as is done for allocations and macro expansion.
288 return DirectCallGenerator::generate(jvms);
289 }
290 };
291
292
293 void LateInlineCallGenerator::do_late_inline() {
294 // Can't inline it
295 if (call_node() == NULL || call_node()->outcnt() == 0 ||
296 call_node()->in(0) == NULL || call_node()->in(0)->is_top())
297 return;
298
299 CallStaticJavaNode* call = call_node();
300
301 // Make a clone of the JVMState that appropriate to use for driving a parse
302 Compile* C = Compile::current();
303 JVMState* jvms = call->jvms()->clone_shallow(C);
304 uint size = call->req();
305 SafePointNode* map = new (C) SafePointNode(size, jvms);
306 for (uint i1 = 0; i1 < size; i1++) {
307 map->init_req(i1, call->in(i1));
308 }
309
310 // Make sure the state is a MergeMem for parsing.
311 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
312 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
313 C->initial_gvn()->set_type_bottom(mem);
314 map->set_req(TypeFunc::Memory, mem);
315 }
316
317 // Make enough space for the expression stack and transfer the incoming arguments
318 int nargs = method()->arg_size();
319 jvms->set_map(map);
320 map->ensure_stack(jvms, jvms->method()->max_stack());
321 if (nargs > 0) {
322 for (int i1 = 0; i1 < nargs; i1++) {
323 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
324 }
325 }
326
327 C->print_inlining_insert(this);
328
329 CompileLog* log = C->log();
330 if (log != NULL) {
331 log->head("late_inline method='%d'", log->identify(method()));
332 JVMState* p = jvms;
333 while (p != NULL) {
334 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
335 p = p->caller();
336 }
337 log->tail("late_inline");
338 }
339
340 // Setup default node notes to be picked up by the inlining
341 Node_Notes* old_nn = C->default_node_notes();
342 if (old_nn != NULL) {
343 Node_Notes* entry_nn = old_nn->clone(C);
344 entry_nn->set_jvms(jvms);
345 C->set_default_node_notes(entry_nn);
346 }
347
348 // Now perform the inling using the synthesized JVMState
349 JVMState* new_jvms = _inline_cg->generate(jvms);
350 if (new_jvms == NULL) return; // no change
351 if (C->failing()) return;
352
353 // Capture any exceptional control flow
354 GraphKit kit(new_jvms);
355
356 // Find the result object
357 Node* result = C->top();
358 int result_size = method()->return_type()->size();
359 if (result_size != 0 && !kit.stopped()) {
360 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
361 }
362
363 kit.replace_call(call, result);
364 }
365
366
367 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
368 return new LateInlineCallGenerator(method, inline_cg);
369 }
370
371
372 //---------------------------WarmCallGenerator--------------------------------
373 // Internal class which handles initial deferral of inlining decisions.
374 class WarmCallGenerator : public CallGenerator {
375 WarmCallInfo* _call_info;
376 CallGenerator* _if_cold;
377 CallGenerator* _if_hot;
378 bool _is_virtual; // caches virtuality of if_cold
379 bool _is_inline; // caches inline-ness of if_hot
380
381 public:
382 WarmCallGenerator(WarmCallInfo* ci,
383 CallGenerator* if_cold,
384 CallGenerator* if_hot)
385 : CallGenerator(if_cold->method())
386 {
387 assert(method() == if_hot->method(), "consistent choices");
388 _call_info = ci;
389 _if_cold = if_cold;
390 _if_hot = if_hot;
569 uint limit = slow_map->req();
570 for (uint i = TypeFunc::Parms; i < limit; i++) {
571 // Skip unused stack slots; fast forward to monoff();
572 if (i == tos) {
573 i = kit.jvms()->monoff();
574 if( i >= limit ) break;
575 }
576 Node* m = kit.map()->in(i);
577 Node* n = slow_map->in(i);
578 if (m != n) {
579 const Type* t = gvn.type(m)->meet(gvn.type(n));
580 Node* phi = PhiNode::make(region, m, t);
581 phi->set_req(2, n);
582 kit.map()->set_req(i, gvn.transform(phi));
583 }
584 }
585 return kit.transfer_exceptions_into_jvms();
586 }
587
588
589 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
590 assert(callee->is_method_handle_intrinsic() ||
591 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
592 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee);
593 if (cg != NULL)
594 return cg;
595 return CallGenerator::for_direct_call(callee);
596 }
597
598 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
599 GraphKit kit(jvms);
600 PhaseGVN& gvn = kit.gvn();
601 Compile* C = kit.C;
602 vmIntrinsics::ID iid = callee->intrinsic_id();
603 switch (iid) {
604 case vmIntrinsics::_invokeBasic:
605 {
606 // Get MethodHandle receiver:
607 Node* receiver = kit.argument(0);
608 if (receiver->Opcode() == Op_ConP) {
609 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
610 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
611 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
612 const int vtable_index = Method::invalid_vtable_index;
613 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
614 if (cg != NULL && cg->is_inline())
615 return cg;
616 } else {
617 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
618 }
619 }
620 break;
621
622 case vmIntrinsics::_linkToVirtual:
623 case vmIntrinsics::_linkToStatic:
624 case vmIntrinsics::_linkToSpecial:
625 case vmIntrinsics::_linkToInterface:
626 {
627 // Get MemberName argument:
628 Node* member_name = kit.argument(callee->arg_size() - 1);
629 if (member_name->Opcode() == Op_ConP) {
630 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
631 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
632
633 // In lamda forms we erase signature types to avoid resolving issues
634 // involving class loaders. When we optimize a method handle invoke
635 // to a direct call we must cast the receiver and arguments to its
636 // actual types.
637 ciSignature* signature = target->signature();
638 const int receiver_skip = target->is_static() ? 0 : 1;
639 // Cast receiver to its type.
640 if (!target->is_static()) {
641 Node* arg = kit.argument(0);
642 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
643 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
644 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
645 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
646 kit.set_argument(0, cast_obj);
647 }
648 }
649 // Cast reference arguments to its type.
650 for (int i = 0; i < signature->count(); i++) {
651 ciType* t = signature->type_at(i);
652 if (t->is_klass()) {
653 Node* arg = kit.argument(receiver_skip + i);
654 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
655 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
656 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
657 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
658 kit.set_argument(receiver_skip + i, cast_obj);
659 }
660 }
661 }
662 const int vtable_index = Method::invalid_vtable_index;
663 const bool call_is_virtual = target->is_abstract(); // FIXME workaround
664 CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS);
665 if (cg != NULL && cg->is_inline())
666 return cg;
667 }
668 }
669 break;
670
671 default:
672 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
673 break;
674 }
675 return NULL;
676 }
677
678
679 //------------------------PredictedIntrinsicGenerator------------------------------
680 // Internal class which handles all predicted Intrinsic calls.
681 class PredictedIntrinsicGenerator : public CallGenerator {
682 CallGenerator* _intrinsic;
683 CallGenerator* _cg;
684
|
245 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
246 if (InlineTree::check_can_parse(m) != NULL) return NULL;
247 float past_uses = m->interpreter_invocation_count();
248 float expected_uses = past_uses;
249 return new ParseGenerator(m, expected_uses, true);
250 }
251
252 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
253 assert(!m->is_abstract(), "for_direct_call mismatch");
254 return new DirectCallGenerator(m, separate_io_proj);
255 }
256
257 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
258 assert(!m->is_static(), "for_virtual_call mismatch");
259 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
260 return new VirtualCallGenerator(m, vtable_index);
261 }
262
263 // Allow inlining decisions to be delayed
264 class LateInlineCallGenerator : public DirectCallGenerator {
265 protected:
266 CallGenerator* _inline_cg;
267
268 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
269
270 public:
271 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
272 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
273
274 virtual bool is_late_inline() const { return true; }
275
276 // Convert the CallStaticJava into an inline
277 virtual void do_late_inline();
278
279 virtual JVMState* generate(JVMState* jvms) {
280 Compile *C = Compile::current();
281 C->print_inlining_skip(this);
282
283 // Record that this call site should be revisited once the main
284 // parse is finished.
285 if (!is_mh_late_inline()) {
286 C->add_late_inline(this);
287 }
288
289 // Emit the CallStaticJava and request separate projections so
290 // that the late inlining logic can distinguish between fall
291 // through and exceptional uses of the memory and io projections
292 // as is done for allocations and macro expansion.
293 return DirectCallGenerator::generate(jvms);
294 }
295
296 virtual void print_inlining_late(const char* msg) {
297 CallNode* call = call_node();
298 Compile* C = Compile::current();
299 C->print_inlining_insert(this);
300 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
301 }
302
303 };
304
305 void LateInlineCallGenerator::do_late_inline() {
306 // Can't inline it
307
308 if (call_node() == NULL || call_node()->outcnt() == 0 ||
309 call_node()->in(0) == NULL || call_node()->in(0)->is_top())
310 return;
311
312 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
313 if (call_node()->in(TypeFunc::Parms + i1)->is_top()) {
314 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
315 return;
316 }
317 }
318
319 if (call_node()->in(TypeFunc::Memory)->is_top()) {
320 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
321 return;
322 }
323
324 CallStaticJavaNode* call = call_node();
325
326 // Make a clone of the JVMState that appropriate to use for driving a parse
327 Compile* C = Compile::current();
328 JVMState* jvms = call->jvms()->clone_shallow(C);
329 uint size = call->req();
330 SafePointNode* map = new (C) SafePointNode(size, jvms);
331 for (uint i1 = 0; i1 < size; i1++) {
332 map->init_req(i1, call->in(i1));
333 }
334
335 // Make sure the state is a MergeMem for parsing.
336 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
337 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
338 C->initial_gvn()->set_type_bottom(mem);
339 map->set_req(TypeFunc::Memory, mem);
340 }
341
342 // Make enough space for the expression stack and transfer the incoming arguments
343 int nargs = method()->arg_size();
344 jvms->set_map(map);
345 map->ensure_stack(jvms, jvms->method()->max_stack());
346 if (nargs > 0) {
347 for (int i1 = 0; i1 < nargs; i1++) {
348 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
349 }
350 }
351
352 if (!do_late_inline_check(jvms)) {
353 map->disconnect_inputs(NULL, C);
354 return;
355 }
356
357 C->print_inlining_insert(this);
358
359 CompileLog* log = C->log();
360 if (log != NULL) {
361 log->head("late_inline method='%d'", log->identify(method()));
362 JVMState* p = jvms;
363 while (p != NULL) {
364 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
365 p = p->caller();
366 }
367 log->tail("late_inline");
368 }
369
370 // Setup default node notes to be picked up by the inlining
371 Node_Notes* old_nn = C->default_node_notes();
372 if (old_nn != NULL) {
373 Node_Notes* entry_nn = old_nn->clone(C);
374 entry_nn->set_jvms(jvms);
375 C->set_default_node_notes(entry_nn);
376 }
377
378 // Now perform the inling using the synthesized JVMState
379 JVMState* new_jvms = _inline_cg->generate(jvms);
380 if (new_jvms == NULL) return; // no change
381 if (C->failing()) return;
382
383 // Capture any exceptional control flow
384 GraphKit kit(new_jvms);
385
386 // Find the result object
387 Node* result = C->top();
388 int result_size = method()->return_type()->size();
389 if (result_size != 0 && !kit.stopped()) {
390 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
391 }
392
393 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
394 C->env()->notice_inlined_method(_inline_cg->method());
395 C->set_inlining_progress();
396
397 kit.replace_call(call, result);
398 }
399
400
401 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
402 return new LateInlineCallGenerator(method, inline_cg);
403 }
404
405 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
406 ciMethod* _caller;
407 int _attempt;
408 bool _input_not_const;
409
410 virtual bool do_late_inline_check(JVMState* jvms);
411 virtual bool already_attempted() const { return _attempt > 0; }
412
413 public:
414 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
415 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
416
417 virtual bool is_mh_late_inline() const { return true; }
418
419 virtual JVMState* generate(JVMState* jvms) {
420 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
421 if (_input_not_const) {
422 // inlining won't be possible so no need to enqueue right now.
423 call_node()->_cg = this;
424 } else {
425 Compile::current()->add_late_inline(this);
426 }
427 return new_jvms;
428 }
429
430 virtual void print_inlining_late(const char* msg) {
431 if (!_input_not_const) return;
432 LateInlineCallGenerator::print_inlining_late(msg);
433 }
434 };
435
436 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
437
438 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
439
440 if (!_input_not_const) {
441 _attempt++;
442 }
443
444 if (cg != NULL) {
445 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
446 _inline_cg = cg;
447 Compile::current()->dec_number_of_mh_late_inlines();
448 return true;
449 }
450
451 call_node()->_cg = this;
452 return false;
453 }
454
455 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
456 Compile::current()->inc_number_of_mh_late_inlines();
457 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
458 return cg;
459 }
460
461 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
462
463 public:
464 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
465 LateInlineCallGenerator(method, inline_cg) {}
466
467 virtual JVMState* generate(JVMState* jvms) {
468 Compile *C = Compile::current();
469 C->print_inlining_skip(this);
470
471 C->add_string_late_inline(this);
472
473 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
474 return new_jvms;
475 }
476 };
477
478 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
479 return new LateInlineStringCallGenerator(method, inline_cg);
480 }
481
482
483 //---------------------------WarmCallGenerator--------------------------------
484 // Internal class which handles initial deferral of inlining decisions.
485 class WarmCallGenerator : public CallGenerator {
486 WarmCallInfo* _call_info;
487 CallGenerator* _if_cold;
488 CallGenerator* _if_hot;
489 bool _is_virtual; // caches virtuality of if_cold
490 bool _is_inline; // caches inline-ness of if_hot
491
492 public:
493 WarmCallGenerator(WarmCallInfo* ci,
494 CallGenerator* if_cold,
495 CallGenerator* if_hot)
496 : CallGenerator(if_cold->method())
497 {
498 assert(method() == if_hot->method(), "consistent choices");
499 _call_info = ci;
500 _if_cold = if_cold;
501 _if_hot = if_hot;
680 uint limit = slow_map->req();
681 for (uint i = TypeFunc::Parms; i < limit; i++) {
682 // Skip unused stack slots; fast forward to monoff();
683 if (i == tos) {
684 i = kit.jvms()->monoff();
685 if( i >= limit ) break;
686 }
687 Node* m = kit.map()->in(i);
688 Node* n = slow_map->in(i);
689 if (m != n) {
690 const Type* t = gvn.type(m)->meet(gvn.type(n));
691 Node* phi = PhiNode::make(region, m, t);
692 phi->set_req(2, n);
693 kit.map()->set_req(i, gvn.transform(phi));
694 }
695 }
696 return kit.transfer_exceptions_into_jvms();
697 }
698
699
700 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
701 assert(callee->is_method_handle_intrinsic() ||
702 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
703 bool input_not_const;
704 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
705 Compile* C = Compile::current();
706 if (cg != NULL) {
707 if (!delayed_forbidden && AlwaysIncrementalInline) {
708 return CallGenerator::for_late_inline(callee, cg);
709 } else {
710 return cg;
711 }
712 }
713 int bci = jvms->bci();
714 ciCallProfile profile = caller->call_profile_at_bci(bci);
715 int call_site_count = caller->scale_count(profile.count());
716
717 if (IncrementalInline && call_site_count > 0 &&
718 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
719 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
720 } else {
721 return CallGenerator::for_direct_call(callee);
722 }
723 }
724
725 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
726 GraphKit kit(jvms);
727 PhaseGVN& gvn = kit.gvn();
728 Compile* C = kit.C;
729 vmIntrinsics::ID iid = callee->intrinsic_id();
730 input_not_const = true;
731 switch (iid) {
732 case vmIntrinsics::_invokeBasic:
733 {
734 // Get MethodHandle receiver:
735 Node* receiver = kit.argument(0);
736 if (receiver->Opcode() == Op_ConP) {
737 input_not_const = false;
738 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
739 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
740 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
741 const int vtable_index = Method::invalid_vtable_index;
742 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
743 assert (!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
744 if (cg != NULL && cg->is_inline())
745 return cg;
746 }
747 }
748 break;
749
750 case vmIntrinsics::_linkToVirtual:
751 case vmIntrinsics::_linkToStatic:
752 case vmIntrinsics::_linkToSpecial:
753 case vmIntrinsics::_linkToInterface:
754 {
755 // Get MemberName argument:
756 Node* member_name = kit.argument(callee->arg_size() - 1);
757 if (member_name->Opcode() == Op_ConP) {
758 input_not_const = false;
759 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
760 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
761
762 // In lamda forms we erase signature types to avoid resolving issues
763 // involving class loaders. When we optimize a method handle invoke
764 // to a direct call we must cast the receiver and arguments to its
765 // actual types.
766 ciSignature* signature = target->signature();
767 const int receiver_skip = target->is_static() ? 0 : 1;
768 // Cast receiver to its type.
769 if (!target->is_static()) {
770 Node* arg = kit.argument(0);
771 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
772 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
773 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
774 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
775 kit.set_argument(0, cast_obj);
776 }
777 }
778 // Cast reference arguments to its type.
779 for (int i = 0; i < signature->count(); i++) {
780 ciType* t = signature->type_at(i);
781 if (t->is_klass()) {
782 Node* arg = kit.argument(receiver_skip + i);
783 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
784 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
785 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
786 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
787 kit.set_argument(receiver_skip + i, cast_obj);
788 }
789 }
790 }
791 const int vtable_index = Method::invalid_vtable_index;
792 const bool call_is_virtual = target->is_abstract(); // FIXME workaround
793 CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS, true, true);
794 assert (!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
795 if (cg != NULL && cg->is_inline())
796 return cg;
797 }
798 }
799 break;
800
801 default:
802 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
803 break;
804 }
805 return NULL;
806 }
807
808
809 //------------------------PredictedIntrinsicGenerator------------------------------
810 // Internal class which handles all predicted Intrinsic calls.
811 class PredictedIntrinsicGenerator : public CallGenerator {
812 CallGenerator* _intrinsic;
813 CallGenerator* _cg;
814
|