46 return TypeFunc::make(method());
47 }
48
49 //-----------------------------ParseGenerator---------------------------------
50 // Internal class which handles all direct bytecode traversal.
51 class ParseGenerator : public InlineCallGenerator {
52 private:
53 bool _is_osr;
54 float _expected_uses;
55
56 public:
57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
58 : InlineCallGenerator(method)
59 {
60 _is_osr = is_osr;
61 _expected_uses = expected_uses;
62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
63 }
64
65 virtual bool is_parse() const { return true; }
66 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
67 int is_osr() { return _is_osr; }
68
69 };
70
71 JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
72 Compile* C = Compile::current();
73 C->print_inlining_update(this);
74
75 if (is_osr()) {
76 // The JVMS for a OSR has a single argument (see its TypeFunc).
77 assert(jvms->depth() == 1, "no inline OSR");
78 }
79
80 if (C->failing()) {
81 return NULL; // bailing out of the compile; do not try to parse
82 }
83
84 Parse parser(jvms, method(), _expected_uses, parent_parser);
85 // Grab signature for matching/allocation
86 #ifdef ASSERT
87 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
88 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
89 assert(C->env()->system_dictionary_modification_counter_changed(),
90 "Must invalidate if TypeFuncs differ");
91 }
92 #endif
93
94 GraphKit& exits = parser.exits();
95
96 if (C->failing()) {
97 while (exits.pop_exception_state() != NULL) ;
98 return NULL;
99 }
100
101 assert(exits.jvms()->same_calls_as(jvms), "sanity");
102
103 // Simply return the exit state of the parser,
104 // augmented by any exceptional states.
105 return exits.transfer_exceptions_into_jvms();
106 }
107
108 //---------------------------DirectCallGenerator------------------------------
109 // Internal class which handles all out-of-line calls w/o receiver type checks.
110 class DirectCallGenerator : public CallGenerator {
111 private:
112 CallStaticJavaNode* _call_node;
113 // Force separate memory and I/O projections for the exceptional
114 // paths to facilitate late inlinig.
115 bool _separate_io_proj;
116
117 public:
118 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
119 : CallGenerator(method),
120 _separate_io_proj(separate_io_proj)
121 {
122 }
123 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
124
125 CallStaticJavaNode* call_node() const { return _call_node; }
126 };
127
128 JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
129 GraphKit kit(jvms);
130 kit.C->print_inlining_update(this);
131 bool is_static = method()->is_static();
132 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
133 : SharedRuntime::get_resolve_opt_virtual_call_stub();
134
135 if (kit.C->log() != NULL) {
136 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
137 }
138
139 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
140 _call_node = call; // Save the call node in case we need it later
141 if (!is_static) {
142 // Make an explicit receiver null_check as part of this call.
143 // Since we share a map with the caller, his JVMS gets adjusted.
144 kit.null_check_receiver_before_call(method());
145 if (kit.stopped()) {
146 // And dump it back to the caller, decorated with any exceptions:
147 return kit.transfer_exceptions_into_jvms();
148 }
156 kit.set_arguments_for_java_call(call);
157 kit.set_edges_for_java_call(call, false, _separate_io_proj);
158 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
159 kit.push_node(method()->return_type()->basic_type(), ret);
160 return kit.transfer_exceptions_into_jvms();
161 }
162
163 //--------------------------VirtualCallGenerator------------------------------
164 // Internal class which handles all out-of-line calls checking receiver type.
165 class VirtualCallGenerator : public CallGenerator {
166 private:
167 int _vtable_index;
168 public:
169 VirtualCallGenerator(ciMethod* method, int vtable_index)
170 : CallGenerator(method), _vtable_index(vtable_index)
171 {
172 assert(vtable_index == Method::invalid_vtable_index ||
173 vtable_index >= 0, "either invalid or usable");
174 }
175 virtual bool is_virtual() const { return true; }
176 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
177 };
178
179 JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
180 GraphKit kit(jvms);
181 Node* receiver = kit.argument(0);
182
183 kit.C->print_inlining_update(this);
184
185 if (kit.C->log() != NULL) {
186 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
187 }
188
189 // If the receiver is a constant null, do not torture the system
190 // by attempting to call through it. The compile will proceed
191 // correctly, but may bail out in final_graph_reshaping, because
192 // the call instruction will have a seemingly deficient out-count.
193 // (The bailout says something misleading about an "infinite loop".)
194 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
195 kit.inc_sp(method()->arg_size()); // restore arguments
196 kit.uncommon_trap(Deoptimization::Reason_null_check,
197 Deoptimization::Action_none,
198 NULL, "null receiver");
199 return kit.transfer_exceptions_into_jvms();
266
267 // Allow inlining decisions to be delayed
268 class LateInlineCallGenerator : public DirectCallGenerator {
269 private:
270 // unique id for log compilation
271 jlong _unique_id;
272
273 protected:
274 CallGenerator* _inline_cg;
275 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
276
277 public:
278 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
279 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
280
281 virtual bool is_late_inline() const { return true; }
282
283 // Convert the CallStaticJava into an inline
284 virtual void do_late_inline();
285
286 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
287 Compile *C = Compile::current();
288
289 C->log_inline_id(this);
290
291 // Record that this call site should be revisited once the main
292 // parse is finished.
293 if (!is_mh_late_inline()) {
294 C->add_late_inline(this);
295 }
296
297 // Emit the CallStaticJava and request separate projections so
298 // that the late inlining logic can distinguish between fall
299 // through and exceptional uses of the memory and io projections
300 // as is done for allocations and macro expansion.
301 return DirectCallGenerator::generate(jvms, parent_parser);
302 }
303
304 virtual void print_inlining_late(const char* msg) {
305 CallNode* call = call_node();
306 Compile* C = Compile::current();
307 C->print_inlining_assert_ready();
308 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
309 C->print_inlining_move_to(this);
310 C->print_inlining_update_delayed(this);
311 }
312
313 virtual void set_unique_id(jlong id) {
314 _unique_id = id;
315 }
316
317 virtual jlong unique_id() const {
318 return _unique_id;
319 }
320 };
321
382 C->print_inlining_move_to(this);
383
384 C->log_late_inline(this);
385
386 // This check is done here because for_method_handle_inline() method
387 // needs jvms for inlined state.
388 if (!do_late_inline_check(jvms)) {
389 map->disconnect_inputs(NULL, C);
390 return;
391 }
392
393 // Setup default node notes to be picked up by the inlining
394 Node_Notes* old_nn = C->node_notes_at(call->_idx);
395 if (old_nn != NULL) {
396 Node_Notes* entry_nn = old_nn->clone(C);
397 entry_nn->set_jvms(jvms);
398 C->set_default_node_notes(entry_nn);
399 }
400
401 // Now perform the inlining using the synthesized JVMState
402 JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
403 if (new_jvms == NULL) return; // no change
404 if (C->failing()) return;
405
406 // Capture any exceptional control flow
407 GraphKit kit(new_jvms);
408
409 // Find the result object
410 Node* result = C->top();
411 int result_size = method()->return_type()->size();
412 if (result_size != 0 && !kit.stopped()) {
413 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
414 }
415
416 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
417 C->env()->notice_inlined_method(_inline_cg->method());
418 C->set_inlining_progress(true);
419
420 kit.replace_call(call, result);
421 }
422
423
424 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
425 return new LateInlineCallGenerator(method, inline_cg);
426 }
427
428 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
429 ciMethod* _caller;
430 int _attempt;
431 bool _input_not_const;
432
433 virtual bool do_late_inline_check(JVMState* jvms);
434 virtual bool already_attempted() const { return _attempt > 0; }
435
436 public:
437 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
438 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
439
440 virtual bool is_mh_late_inline() const { return true; }
441
442 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
443 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
444
445 Compile* C = Compile::current();
446 if (_input_not_const) {
447 // inlining won't be possible so no need to enqueue right now.
448 call_node()->set_generator(this);
449 } else {
450 C->add_late_inline(this);
451 }
452 return new_jvms;
453 }
454 };
455
456 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
457
458 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
459
460 Compile::current()->print_inlining_update_delayed(this);
461
462 if (!_input_not_const) {
463 _attempt++;
469 Compile::current()->dec_number_of_mh_late_inlines();
470 return true;
471 }
472
473 call_node()->set_generator(this);
474 return false;
475 }
476
477 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
478 Compile::current()->inc_number_of_mh_late_inlines();
479 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
480 return cg;
481 }
482
483 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
484
485 public:
486 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
487 LateInlineCallGenerator(method, inline_cg) {}
488
489 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
490 Compile *C = Compile::current();
491
492 C->log_inline_id(this);
493
494 C->add_string_late_inline(this);
495
496 JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
497 return new_jvms;
498 }
499
500 virtual bool is_string_late_inline() const { return true; }
501 };
502
503 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
504 return new LateInlineStringCallGenerator(method, inline_cg);
505 }
506
507 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
508
509 public:
510 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
511 LateInlineCallGenerator(method, inline_cg) {}
512
513 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
514 Compile *C = Compile::current();
515
516 C->log_inline_id(this);
517
518 C->add_boxing_late_inline(this);
519
520 JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
521 return new_jvms;
522 }
523 };
524
525 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
526 return new LateInlineBoxingCallGenerator(method, inline_cg);
527 }
528
529 //---------------------------WarmCallGenerator--------------------------------
530 // Internal class which handles initial deferral of inlining decisions.
531 class WarmCallGenerator : public CallGenerator {
532 WarmCallInfo* _call_info;
533 CallGenerator* _if_cold;
534 CallGenerator* _if_hot;
535 bool _is_virtual; // caches virtuality of if_cold
536 bool _is_inline; // caches inline-ness of if_hot
537
538 public:
539 WarmCallGenerator(WarmCallInfo* ci,
540 CallGenerator* if_cold,
541 CallGenerator* if_hot)
542 : CallGenerator(if_cold->method())
543 {
544 assert(method() == if_hot->method(), "consistent choices");
545 _call_info = ci;
546 _if_cold = if_cold;
547 _if_hot = if_hot;
548 _is_virtual = if_cold->is_virtual();
549 _is_inline = if_hot->is_inline();
550 }
551
552 virtual bool is_inline() const { return _is_inline; }
553 virtual bool is_virtual() const { return _is_virtual; }
554 virtual bool is_deferred() const { return true; }
555
556 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
557 };
558
559
560 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
561 CallGenerator* if_cold,
562 CallGenerator* if_hot) {
563 return new WarmCallGenerator(ci, if_cold, if_hot);
564 }
565
566 JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
567 Compile* C = Compile::current();
568 C->print_inlining_update(this);
569
570 if (C->log() != NULL) {
571 C->log()->elem("warm_call bci='%d'", jvms->bci());
572 }
573 jvms = _if_cold->generate(jvms, parent_parser);
574 if (jvms != NULL) {
575 Node* m = jvms->map()->control();
576 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
577 if (m->is_Catch()) m = m->in(0); else m = C->top();
578 if (m->is_Proj()) m = m->in(0); else m = C->top();
579 if (m->is_CallJava()) {
580 _call_info->set_call(m->as_Call());
581 _call_info->set_hot_cg(_if_hot);
582 #ifndef PRODUCT
583 if (PrintOpto || PrintOptoInlining) {
584 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
585 tty->print("WCI: ");
586 _call_info->print();
587 }
588 #endif
589 _call_info->set_heat(_call_info->compute_heat());
590 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
591 }
592 }
593 return jvms;
614 PredictedCallGenerator(ciKlass* predicted_receiver,
615 CallGenerator* if_missed,
616 CallGenerator* if_hit, float hit_prob)
617 : CallGenerator(if_missed->method())
618 {
619 // The call profile data may predict the hit_prob as extreme as 0 or 1.
620 // Remove the extremes values from the range.
621 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
622 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
623
624 _predicted_receiver = predicted_receiver;
625 _if_missed = if_missed;
626 _if_hit = if_hit;
627 _hit_prob = hit_prob;
628 }
629
630 virtual bool is_virtual() const { return true; }
631 virtual bool is_inline() const { return _if_hit->is_inline(); }
632 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
633
634 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
635 };
636
637
638 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
639 CallGenerator* if_missed,
640 CallGenerator* if_hit,
641 float hit_prob) {
642 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
643 }
644
645
646 JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
647 GraphKit kit(jvms);
648 kit.C->print_inlining_update(this);
649 PhaseGVN& gvn = kit.gvn();
650 // We need an explicit receiver null_check before checking its type.
651 // We share a map with the caller, so his JVMS gets adjusted.
652 Node* receiver = kit.argument(0);
653
654 CompileLog* log = kit.C->log();
655 if (log != NULL) {
656 log->elem("predicted_call bci='%d' klass='%d'",
657 jvms->bci(), log->identify(_predicted_receiver));
658 }
659
660 receiver = kit.null_check_receiver_before_call(method());
661 if (kit.stopped()) {
662 return kit.transfer_exceptions_into_jvms();
663 }
664
665 Node* exact_receiver = receiver; // will get updated in place...
666 Node* slow_ctl = kit.type_check_receiver(receiver,
667 _predicted_receiver, _hit_prob,
668 &exact_receiver);
669
670 SafePointNode* slow_map = NULL;
671 JVMState* slow_jvms;
672 { PreserveJVMState pjvms(&kit);
673 kit.set_control(slow_ctl);
674 if (!kit.stopped()) {
675 slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
676 if (kit.failing())
677 return NULL; // might happen because of NodeCountInliningCutoff
678 assert(slow_jvms != NULL, "must be");
679 kit.add_exception_states_from(slow_jvms);
680 kit.set_map(slow_jvms->map());
681 if (!kit.stopped())
682 slow_map = kit.stop();
683 }
684 }
685
686 if (kit.stopped()) {
687 // Instance exactly does not matches the desired type.
688 kit.set_jvms(slow_jvms);
689 return kit.transfer_exceptions_into_jvms();
690 }
691
692 // fall through if the instance exactly matches the desired type
693 kit.replace_in_map(receiver, exact_receiver);
694
695 // Make the hot call:
696 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
697 if (new_jvms == NULL) {
698 // Inline failed, so make a direct call.
699 assert(_if_hit->is_inline(), "must have been a failed inline");
700 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
701 new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
702 }
703 kit.add_exception_states_from(new_jvms);
704 kit.set_jvms(new_jvms);
705
706 // Need to merge slow and fast?
707 if (slow_map == NULL) {
708 // The fast path is the only path remaining.
709 return kit.transfer_exceptions_into_jvms();
710 }
711
712 if (kit.stopped()) {
713 // Inlined method threw an exception, so it's just the slow path after all.
714 kit.set_jvms(slow_jvms);
715 return kit.transfer_exceptions_into_jvms();
716 }
717
718 // Finish the diamond.
719 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
720 RegionNode* region = new RegionNode(3);
721 region->init_req(1, kit.control());
722 region->init_req(2, slow_map->control());
723 kit.set_control(gvn.transform(region));
724 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
725 iophi->set_req(2, slow_map->i_o());
726 kit.set_i_o(gvn.transform(iophi));
727 kit.merge_memory(slow_map->merged_memory(), region, 2);
728 uint tos = kit.jvms()->stkoff() + kit.sp();
729 uint limit = slow_map->req();
730 for (uint i = TypeFunc::Parms; i < limit; i++) {
731 // Skip unused stack slots; fast forward to monoff();
732 if (i == tos) {
733 i = kit.jvms()->monoff();
734 if( i >= limit ) break;
735 }
736 Node* m = kit.map()->in(i);
737 Node* n = slow_map->in(i);
884
885 //------------------------PredictedIntrinsicGenerator------------------------------
886 // Internal class which handles all predicted Intrinsic calls.
887 class PredictedIntrinsicGenerator : public CallGenerator {
888 CallGenerator* _intrinsic;
889 CallGenerator* _cg;
890
891 public:
892 PredictedIntrinsicGenerator(CallGenerator* intrinsic,
893 CallGenerator* cg)
894 : CallGenerator(cg->method())
895 {
896 _intrinsic = intrinsic;
897 _cg = cg;
898 }
899
900 virtual bool is_virtual() const { return true; }
901 virtual bool is_inlined() const { return true; }
902 virtual bool is_intrinsic() const { return true; }
903
904 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
905 };
906
907
908 CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
909 CallGenerator* cg) {
910 return new PredictedIntrinsicGenerator(intrinsic, cg);
911 }
912
913
914 JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
915 GraphKit kit(jvms);
916 PhaseGVN& gvn = kit.gvn();
917
918 CompileLog* log = kit.C->log();
919 if (log != NULL) {
920 log->elem("predicted_intrinsic bci='%d' method='%d'",
921 jvms->bci(), log->identify(method()));
922 }
923
924 Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
925 if (kit.failing())
926 return NULL; // might happen because of NodeCountInliningCutoff
927
928 kit.C->print_inlining_update(this);
929 SafePointNode* slow_map = NULL;
930 JVMState* slow_jvms;
931 if (slow_ctl != NULL) {
932 PreserveJVMState pjvms(&kit);
933 kit.set_control(slow_ctl);
934 if (!kit.stopped()) {
935 slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
936 if (kit.failing())
937 return NULL; // might happen because of NodeCountInliningCutoff
938 assert(slow_jvms != NULL, "must be");
939 kit.add_exception_states_from(slow_jvms);
940 kit.set_map(slow_jvms->map());
941 if (!kit.stopped())
942 slow_map = kit.stop();
943 }
944 }
945
946 if (kit.stopped()) {
947 // Predicate is always false.
948 kit.set_jvms(slow_jvms);
949 return kit.transfer_exceptions_into_jvms();
950 }
951
952 // Generate intrinsic code:
953 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
954 if (new_jvms == NULL) {
955 // Intrinsic failed, so use slow code or make a direct call.
956 if (slow_map == NULL) {
957 CallGenerator* cg = CallGenerator::for_direct_call(method());
958 new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
959 } else {
960 kit.set_jvms(slow_jvms);
961 return kit.transfer_exceptions_into_jvms();
962 }
963 }
964 kit.add_exception_states_from(new_jvms);
965 kit.set_jvms(new_jvms);
966
967 // Need to merge slow and fast?
968 if (slow_map == NULL) {
969 // The fast path is the only path remaining.
970 return kit.transfer_exceptions_into_jvms();
971 }
972
973 if (kit.stopped()) {
974 // Intrinsic method threw an exception, so it's just the slow path after all.
975 kit.set_jvms(slow_jvms);
976 return kit.transfer_exceptions_into_jvms();
977 }
978
1008
1009 //-------------------------UncommonTrapCallGenerator-----------------------------
1010 // Internal class which handles all out-of-line calls checking receiver type.
1011 class UncommonTrapCallGenerator : public CallGenerator {
1012 Deoptimization::DeoptReason _reason;
1013 Deoptimization::DeoptAction _action;
1014
1015 public:
1016 UncommonTrapCallGenerator(ciMethod* m,
1017 Deoptimization::DeoptReason reason,
1018 Deoptimization::DeoptAction action)
1019 : CallGenerator(m)
1020 {
1021 _reason = reason;
1022 _action = action;
1023 }
1024
1025 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1026 virtual bool is_trap() const { return true; }
1027
1028 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
1029 };
1030
1031
1032 CallGenerator*
1033 CallGenerator::for_uncommon_trap(ciMethod* m,
1034 Deoptimization::DeoptReason reason,
1035 Deoptimization::DeoptAction action) {
1036 return new UncommonTrapCallGenerator(m, reason, action);
1037 }
1038
1039
1040 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
1041 GraphKit kit(jvms);
1042 kit.C->print_inlining_update(this);
1043 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1044 int nargs = method()->arg_size();
1045 kit.inc_sp(nargs);
1046 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1047 if (_reason == Deoptimization::Reason_class_check &&
1048 _action == Deoptimization::Action_maybe_recompile) {
1049 // Temp fix for 6529811
1050 // Don't allow uncommon_trap to override our decision to recompile in the event
1051 // of a class cast failure for a monomorphic call as it will never let us convert
1052 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1053 bool keep_exact_action = true;
1054 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1055 } else {
1056 kit.uncommon_trap(_reason, _action);
1057 }
1058 return kit.transfer_exceptions_into_jvms();
1059 }
1060
|
46 return TypeFunc::make(method());
47 }
48
49 //-----------------------------ParseGenerator---------------------------------
50 // Internal class which handles all direct bytecode traversal.
51 class ParseGenerator : public InlineCallGenerator {
52 private:
53 bool _is_osr;
54 float _expected_uses;
55
56 public:
57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
58 : InlineCallGenerator(method)
59 {
60 _is_osr = is_osr;
61 _expected_uses = expected_uses;
62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
63 }
64
65 virtual bool is_parse() const { return true; }
66 virtual JVMState* generate(JVMState* jvms);
67 int is_osr() { return _is_osr; }
68
69 };
70
71 JVMState* ParseGenerator::generate(JVMState* jvms) {
72 Compile* C = Compile::current();
73 C->print_inlining_update(this);
74
75 if (is_osr()) {
76 // The JVMS for a OSR has a single argument (see its TypeFunc).
77 assert(jvms->depth() == 1, "no inline OSR");
78 }
79
80 if (C->failing()) {
81 return NULL; // bailing out of the compile; do not try to parse
82 }
83
84 Parse parser(jvms, method(), _expected_uses);
85 // Grab signature for matching/allocation
86 #ifdef ASSERT
87 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
88 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
89 assert(C->env()->system_dictionary_modification_counter_changed(),
90 "Must invalidate if TypeFuncs differ");
91 }
92 #endif
93
94 GraphKit& exits = parser.exits();
95
96 if (C->failing()) {
97 while (exits.pop_exception_state() != NULL) ;
98 return NULL;
99 }
100
101 assert(exits.jvms()->same_calls_as(jvms), "sanity");
102
103 // Simply return the exit state of the parser,
104 // augmented by any exceptional states.
105 return exits.transfer_exceptions_into_jvms();
106 }
107
108 //---------------------------DirectCallGenerator------------------------------
109 // Internal class which handles all out-of-line calls w/o receiver type checks.
110 class DirectCallGenerator : public CallGenerator {
111 private:
112 CallStaticJavaNode* _call_node;
113 // Force separate memory and I/O projections for the exceptional
114 // paths to facilitate late inlinig.
115 bool _separate_io_proj;
116
117 public:
118 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
119 : CallGenerator(method),
120 _separate_io_proj(separate_io_proj)
121 {
122 }
123 virtual JVMState* generate(JVMState* jvms);
124
125 CallStaticJavaNode* call_node() const { return _call_node; }
126 };
127
128 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
129 GraphKit kit(jvms);
130 kit.C->print_inlining_update(this);
131 bool is_static = method()->is_static();
132 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
133 : SharedRuntime::get_resolve_opt_virtual_call_stub();
134
135 if (kit.C->log() != NULL) {
136 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
137 }
138
139 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
140 _call_node = call; // Save the call node in case we need it later
141 if (!is_static) {
142 // Make an explicit receiver null_check as part of this call.
143 // Since we share a map with the caller, his JVMS gets adjusted.
144 kit.null_check_receiver_before_call(method());
145 if (kit.stopped()) {
146 // And dump it back to the caller, decorated with any exceptions:
147 return kit.transfer_exceptions_into_jvms();
148 }
156 kit.set_arguments_for_java_call(call);
157 kit.set_edges_for_java_call(call, false, _separate_io_proj);
158 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
159 kit.push_node(method()->return_type()->basic_type(), ret);
160 return kit.transfer_exceptions_into_jvms();
161 }
162
163 //--------------------------VirtualCallGenerator------------------------------
164 // Internal class which handles all out-of-line calls checking receiver type.
165 class VirtualCallGenerator : public CallGenerator {
166 private:
167 int _vtable_index;
168 public:
169 VirtualCallGenerator(ciMethod* method, int vtable_index)
170 : CallGenerator(method), _vtable_index(vtable_index)
171 {
172 assert(vtable_index == Method::invalid_vtable_index ||
173 vtable_index >= 0, "either invalid or usable");
174 }
175 virtual bool is_virtual() const { return true; }
176 virtual JVMState* generate(JVMState* jvms);
177 };
178
179 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
180 GraphKit kit(jvms);
181 Node* receiver = kit.argument(0);
182
183 kit.C->print_inlining_update(this);
184
185 if (kit.C->log() != NULL) {
186 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
187 }
188
189 // If the receiver is a constant null, do not torture the system
190 // by attempting to call through it. The compile will proceed
191 // correctly, but may bail out in final_graph_reshaping, because
192 // the call instruction will have a seemingly deficient out-count.
193 // (The bailout says something misleading about an "infinite loop".)
194 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
195 kit.inc_sp(method()->arg_size()); // restore arguments
196 kit.uncommon_trap(Deoptimization::Reason_null_check,
197 Deoptimization::Action_none,
198 NULL, "null receiver");
199 return kit.transfer_exceptions_into_jvms();
266
267 // Allow inlining decisions to be delayed
268 class LateInlineCallGenerator : public DirectCallGenerator {
269 private:
270 // unique id for log compilation
271 jlong _unique_id;
272
273 protected:
274 CallGenerator* _inline_cg;
275 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
276
277 public:
278 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
279 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
280
281 virtual bool is_late_inline() const { return true; }
282
283 // Convert the CallStaticJava into an inline
284 virtual void do_late_inline();
285
286 virtual JVMState* generate(JVMState* jvms) {
287 Compile *C = Compile::current();
288
289 C->log_inline_id(this);
290
291 // Record that this call site should be revisited once the main
292 // parse is finished.
293 if (!is_mh_late_inline()) {
294 C->add_late_inline(this);
295 }
296
297 // Emit the CallStaticJava and request separate projections so
298 // that the late inlining logic can distinguish between fall
299 // through and exceptional uses of the memory and io projections
300 // as is done for allocations and macro expansion.
301 return DirectCallGenerator::generate(jvms);
302 }
303
304 virtual void print_inlining_late(const char* msg) {
305 CallNode* call = call_node();
306 Compile* C = Compile::current();
307 C->print_inlining_assert_ready();
308 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
309 C->print_inlining_move_to(this);
310 C->print_inlining_update_delayed(this);
311 }
312
313 virtual void set_unique_id(jlong id) {
314 _unique_id = id;
315 }
316
317 virtual jlong unique_id() const {
318 return _unique_id;
319 }
320 };
321
382 C->print_inlining_move_to(this);
383
384 C->log_late_inline(this);
385
386 // This check is done here because for_method_handle_inline() method
387 // needs jvms for inlined state.
388 if (!do_late_inline_check(jvms)) {
389 map->disconnect_inputs(NULL, C);
390 return;
391 }
392
393 // Setup default node notes to be picked up by the inlining
394 Node_Notes* old_nn = C->node_notes_at(call->_idx);
395 if (old_nn != NULL) {
396 Node_Notes* entry_nn = old_nn->clone(C);
397 entry_nn->set_jvms(jvms);
398 C->set_default_node_notes(entry_nn);
399 }
400
401 // Now perform the inlining using the synthesized JVMState
402 JVMState* new_jvms = _inline_cg->generate(jvms);
403 if (new_jvms == NULL) return; // no change
404 if (C->failing()) return;
405
406 // Capture any exceptional control flow
407 GraphKit kit(new_jvms);
408
409 // Find the result object
410 Node* result = C->top();
411 int result_size = method()->return_type()->size();
412 if (result_size != 0 && !kit.stopped()) {
413 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
414 }
415
416 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
417 C->env()->notice_inlined_method(_inline_cg->method());
418 C->set_inlining_progress(true);
419
420 kit.replace_call(call, result, true);
421 }
422
423
424 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
425 return new LateInlineCallGenerator(method, inline_cg);
426 }
427
428 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
429 ciMethod* _caller;
430 int _attempt;
431 bool _input_not_const;
432
433 virtual bool do_late_inline_check(JVMState* jvms);
434 virtual bool already_attempted() const { return _attempt > 0; }
435
436 public:
437 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
438 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
439
440 virtual bool is_mh_late_inline() const { return true; }
441
442 virtual JVMState* generate(JVMState* jvms) {
443 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
444
445 Compile* C = Compile::current();
446 if (_input_not_const) {
447 // inlining won't be possible so no need to enqueue right now.
448 call_node()->set_generator(this);
449 } else {
450 C->add_late_inline(this);
451 }
452 return new_jvms;
453 }
454 };
455
456 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
457
458 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
459
460 Compile::current()->print_inlining_update_delayed(this);
461
462 if (!_input_not_const) {
463 _attempt++;
469 Compile::current()->dec_number_of_mh_late_inlines();
470 return true;
471 }
472
473 call_node()->set_generator(this);
474 return false;
475 }
476
477 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
478 Compile::current()->inc_number_of_mh_late_inlines();
479 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
480 return cg;
481 }
482
483 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
484
485 public:
486 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
487 LateInlineCallGenerator(method, inline_cg) {}
488
489 virtual JVMState* generate(JVMState* jvms) {
490 Compile *C = Compile::current();
491
492 C->log_inline_id(this);
493
494 C->add_string_late_inline(this);
495
496 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
497 return new_jvms;
498 }
499
500 virtual bool is_string_late_inline() const { return true; }
501 };
502
503 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
504 return new LateInlineStringCallGenerator(method, inline_cg);
505 }
506
507 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
508
509 public:
510 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
511 LateInlineCallGenerator(method, inline_cg) {}
512
513 virtual JVMState* generate(JVMState* jvms) {
514 Compile *C = Compile::current();
515
516 C->log_inline_id(this);
517
518 C->add_boxing_late_inline(this);
519
520 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
521 return new_jvms;
522 }
523 };
524
525 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
526 return new LateInlineBoxingCallGenerator(method, inline_cg);
527 }
528
529 //---------------------------WarmCallGenerator--------------------------------
530 // Internal class which handles initial deferral of inlining decisions.
531 class WarmCallGenerator : public CallGenerator {
532 WarmCallInfo* _call_info;
533 CallGenerator* _if_cold;
534 CallGenerator* _if_hot;
535 bool _is_virtual; // caches virtuality of if_cold
536 bool _is_inline; // caches inline-ness of if_hot
537
538 public:
539 WarmCallGenerator(WarmCallInfo* ci,
540 CallGenerator* if_cold,
541 CallGenerator* if_hot)
542 : CallGenerator(if_cold->method())
543 {
544 assert(method() == if_hot->method(), "consistent choices");
545 _call_info = ci;
546 _if_cold = if_cold;
547 _if_hot = if_hot;
548 _is_virtual = if_cold->is_virtual();
549 _is_inline = if_hot->is_inline();
550 }
551
552 virtual bool is_inline() const { return _is_inline; }
553 virtual bool is_virtual() const { return _is_virtual; }
554 virtual bool is_deferred() const { return true; }
555
556 virtual JVMState* generate(JVMState* jvms);
557 };
558
559
560 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
561 CallGenerator* if_cold,
562 CallGenerator* if_hot) {
563 return new WarmCallGenerator(ci, if_cold, if_hot);
564 }
565
566 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
567 Compile* C = Compile::current();
568 C->print_inlining_update(this);
569
570 if (C->log() != NULL) {
571 C->log()->elem("warm_call bci='%d'", jvms->bci());
572 }
573 jvms = _if_cold->generate(jvms);
574 if (jvms != NULL) {
575 Node* m = jvms->map()->control();
576 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
577 if (m->is_Catch()) m = m->in(0); else m = C->top();
578 if (m->is_Proj()) m = m->in(0); else m = C->top();
579 if (m->is_CallJava()) {
580 _call_info->set_call(m->as_Call());
581 _call_info->set_hot_cg(_if_hot);
582 #ifndef PRODUCT
583 if (PrintOpto || PrintOptoInlining) {
584 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
585 tty->print("WCI: ");
586 _call_info->print();
587 }
588 #endif
589 _call_info->set_heat(_call_info->compute_heat());
590 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
591 }
592 }
593 return jvms;
614 PredictedCallGenerator(ciKlass* predicted_receiver,
615 CallGenerator* if_missed,
616 CallGenerator* if_hit, float hit_prob)
617 : CallGenerator(if_missed->method())
618 {
619 // The call profile data may predict the hit_prob as extreme as 0 or 1.
620 // Remove the extremes values from the range.
621 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
622 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
623
624 _predicted_receiver = predicted_receiver;
625 _if_missed = if_missed;
626 _if_hit = if_hit;
627 _hit_prob = hit_prob;
628 }
629
630 virtual bool is_virtual() const { return true; }
631 virtual bool is_inline() const { return _if_hit->is_inline(); }
632 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
633
634 virtual JVMState* generate(JVMState* jvms);
635 };
636
637
638 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
639 CallGenerator* if_missed,
640 CallGenerator* if_hit,
641 float hit_prob) {
642 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
643 }
644
645
646 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
647 GraphKit kit(jvms);
648 kit.C->print_inlining_update(this);
649 PhaseGVN& gvn = kit.gvn();
650 // We need an explicit receiver null_check before checking its type.
651 // We share a map with the caller, so his JVMS gets adjusted.
652 Node* receiver = kit.argument(0);
653 CompileLog* log = kit.C->log();
654 if (log != NULL) {
655 log->elem("predicted_call bci='%d' klass='%d'",
656 jvms->bci(), log->identify(_predicted_receiver));
657 }
658
659 receiver = kit.null_check_receiver_before_call(method());
660 if (kit.stopped()) {
661 return kit.transfer_exceptions_into_jvms();
662 }
663
664 // Make a copy of the replaced nodes in case we need to restore them
665 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
666 replaced_nodes.clone();
667
668 Node* exact_receiver = receiver; // will get updated in place...
669 Node* slow_ctl = kit.type_check_receiver(receiver,
670 _predicted_receiver, _hit_prob,
671 &exact_receiver);
672
673 SafePointNode* slow_map = NULL;
674 JVMState* slow_jvms;
675 { PreserveJVMState pjvms(&kit);
676 kit.set_control(slow_ctl);
677 if (!kit.stopped()) {
678 slow_jvms = _if_missed->generate(kit.sync_jvms());
679 if (kit.failing())
680 return NULL; // might happen because of NodeCountInliningCutoff
681 assert(slow_jvms != NULL, "must be");
682 kit.add_exception_states_from(slow_jvms);
683 kit.set_map(slow_jvms->map());
684 if (!kit.stopped())
685 slow_map = kit.stop();
686 }
687 }
688
689 if (kit.stopped()) {
690 // Instance exactly does not matches the desired type.
691 kit.set_jvms(slow_jvms);
692 return kit.transfer_exceptions_into_jvms();
693 }
694
695 // fall through if the instance exactly matches the desired type
696 kit.replace_in_map(receiver, exact_receiver);
697
698 // Make the hot call:
699 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
700 if (new_jvms == NULL) {
701 // Inline failed, so make a direct call.
702 assert(_if_hit->is_inline(), "must have been a failed inline");
703 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
704 new_jvms = cg->generate(kit.sync_jvms());
705 }
706 kit.add_exception_states_from(new_jvms);
707 kit.set_jvms(new_jvms);
708
709 // Need to merge slow and fast?
710 if (slow_map == NULL) {
711 // The fast path is the only path remaining.
712 return kit.transfer_exceptions_into_jvms();
713 }
714
715 if (kit.stopped()) {
716 // Inlined method threw an exception, so it's just the slow path after all.
717 kit.set_jvms(slow_jvms);
718 return kit.transfer_exceptions_into_jvms();
719 }
720
721 // There are 2 branches and the replaced nodes are only valid on
722 // one: restore the replaced nodes to what they were before the
723 // branch.
724 kit.map()->set_replaced_nodes(replaced_nodes);
725
726 // Finish the diamond.
727 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
728 RegionNode* region = new RegionNode(3);
729 region->init_req(1, kit.control());
730 region->init_req(2, slow_map->control());
731 kit.set_control(gvn.transform(region));
732 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
733 iophi->set_req(2, slow_map->i_o());
734 kit.set_i_o(gvn.transform(iophi));
735 kit.merge_memory(slow_map->merged_memory(), region, 2);
736 uint tos = kit.jvms()->stkoff() + kit.sp();
737 uint limit = slow_map->req();
738 for (uint i = TypeFunc::Parms; i < limit; i++) {
739 // Skip unused stack slots; fast forward to monoff();
740 if (i == tos) {
741 i = kit.jvms()->monoff();
742 if( i >= limit ) break;
743 }
744 Node* m = kit.map()->in(i);
745 Node* n = slow_map->in(i);
892
893 //------------------------PredictedIntrinsicGenerator------------------------------
894 // Internal class which handles all predicted Intrinsic calls.
895 class PredictedIntrinsicGenerator : public CallGenerator {
896 CallGenerator* _intrinsic;
897 CallGenerator* _cg;
898
899 public:
900 PredictedIntrinsicGenerator(CallGenerator* intrinsic,
901 CallGenerator* cg)
902 : CallGenerator(cg->method())
903 {
904 _intrinsic = intrinsic;
905 _cg = cg;
906 }
907
908 virtual bool is_virtual() const { return true; }
909 virtual bool is_inlined() const { return true; }
910 virtual bool is_intrinsic() const { return true; }
911
912 virtual JVMState* generate(JVMState* jvms);
913 };
914
915
916 CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
917 CallGenerator* cg) {
918 return new PredictedIntrinsicGenerator(intrinsic, cg);
919 }
920
921
922 JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
923 GraphKit kit(jvms);
924 PhaseGVN& gvn = kit.gvn();
925
926 CompileLog* log = kit.C->log();
927 if (log != NULL) {
928 log->elem("predicted_intrinsic bci='%d' method='%d'",
929 jvms->bci(), log->identify(method()));
930 }
931
932 Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
933 if (kit.failing())
934 return NULL; // might happen because of NodeCountInliningCutoff
935
936 kit.C->print_inlining_update(this);
937 SafePointNode* slow_map = NULL;
938 JVMState* slow_jvms;
939 if (slow_ctl != NULL) {
940 PreserveJVMState pjvms(&kit);
941 kit.set_control(slow_ctl);
942 if (!kit.stopped()) {
943 slow_jvms = _cg->generate(kit.sync_jvms());
944 if (kit.failing())
945 return NULL; // might happen because of NodeCountInliningCutoff
946 assert(slow_jvms != NULL, "must be");
947 kit.add_exception_states_from(slow_jvms);
948 kit.set_map(slow_jvms->map());
949 if (!kit.stopped())
950 slow_map = kit.stop();
951 }
952 }
953
954 if (kit.stopped()) {
955 // Predicate is always false.
956 kit.set_jvms(slow_jvms);
957 return kit.transfer_exceptions_into_jvms();
958 }
959
960 // Generate intrinsic code:
961 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
962 if (new_jvms == NULL) {
963 // Intrinsic failed, so use slow code or make a direct call.
964 if (slow_map == NULL) {
965 CallGenerator* cg = CallGenerator::for_direct_call(method());
966 new_jvms = cg->generate(kit.sync_jvms());
967 } else {
968 kit.set_jvms(slow_jvms);
969 return kit.transfer_exceptions_into_jvms();
970 }
971 }
972 kit.add_exception_states_from(new_jvms);
973 kit.set_jvms(new_jvms);
974
975 // Need to merge slow and fast?
976 if (slow_map == NULL) {
977 // The fast path is the only path remaining.
978 return kit.transfer_exceptions_into_jvms();
979 }
980
981 if (kit.stopped()) {
982 // Intrinsic method threw an exception, so it's just the slow path after all.
983 kit.set_jvms(slow_jvms);
984 return kit.transfer_exceptions_into_jvms();
985 }
986
1016
1017 //-------------------------UncommonTrapCallGenerator-----------------------------
1018 // Internal class which handles all out-of-line calls checking receiver type.
1019 class UncommonTrapCallGenerator : public CallGenerator {
1020 Deoptimization::DeoptReason _reason;
1021 Deoptimization::DeoptAction _action;
1022
1023 public:
1024 UncommonTrapCallGenerator(ciMethod* m,
1025 Deoptimization::DeoptReason reason,
1026 Deoptimization::DeoptAction action)
1027 : CallGenerator(m)
1028 {
1029 _reason = reason;
1030 _action = action;
1031 }
1032
1033 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1034 virtual bool is_trap() const { return true; }
1035
1036 virtual JVMState* generate(JVMState* jvms);
1037 };
1038
1039
1040 CallGenerator*
1041 CallGenerator::for_uncommon_trap(ciMethod* m,
1042 Deoptimization::DeoptReason reason,
1043 Deoptimization::DeoptAction action) {
1044 return new UncommonTrapCallGenerator(m, reason, action);
1045 }
1046
1047
1048 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1049 GraphKit kit(jvms);
1050 kit.C->print_inlining_update(this);
1051 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1052 int nargs = method()->arg_size();
1053 kit.inc_sp(nargs);
1054 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1055 if (_reason == Deoptimization::Reason_class_check &&
1056 _action == Deoptimization::Action_maybe_recompile) {
1057 // Temp fix for 6529811
1058 // Don't allow uncommon_trap to override our decision to recompile in the event
1059 // of a class cast failure for a monomorphic call as it will never let us convert
1060 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1061 bool keep_exact_action = true;
1062 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1063 } else {
1064 kit.uncommon_trap(_reason, _action);
1065 }
1066 return kit.transfer_exceptions_into_jvms();
1067 }
1068
|