src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/opto/callGenerator.cpp

src/share/vm/opto/callGenerator.cpp

Print this page
rev 5411 : 8024069: replace_in_map() should operate on parent maps
Summary: type information gets lost because replace_in_map() doesn't update parent maps
Reviewed-by:
rev 5413 : [mq]: replaceinmapparents-cleanup

*** 61,76 **** _expected_uses = expected_uses; assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); } virtual bool is_parse() const { return true; } ! virtual JVMState* generate(JVMState* jvms); int is_osr() { return _is_osr; } }; ! JVMState* ParseGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); if (is_osr()) { // The JVMS for a OSR has a single argument (see its TypeFunc). assert(jvms->depth() == 1, "no inline OSR"); --- 61,76 ---- _expected_uses = expected_uses; assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); } virtual bool is_parse() const { return true; } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); int is_osr() { return _is_osr; } }; ! JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { Compile* C = Compile::current(); if (is_osr()) { // The JVMS for a OSR has a single argument (see its TypeFunc). assert(jvms->depth() == 1, "no inline OSR");
*** 78,88 **** if (C->failing()) { return NULL; // bailing out of the compile; do not try to parse } ! Parse parser(jvms, method(), _expected_uses); // Grab signature for matching/allocation #ifdef ASSERT if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); assert(C->env()->system_dictionary_modification_counter_changed(), --- 78,88 ---- if (C->failing()) { return NULL; // bailing out of the compile; do not try to parse } ! Parse parser(jvms, method(), _expected_uses, parent_parser); // Grab signature for matching/allocation #ifdef ASSERT if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); assert(C->env()->system_dictionary_modification_counter_changed(),
*** 117,132 **** DirectCallGenerator(ciMethod* method, bool separate_io_proj) : CallGenerator(method), _separate_io_proj(separate_io_proj) { } ! virtual JVMState* generate(JVMState* jvms); CallStaticJavaNode* call_node() const { return _call_node; } }; ! JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); --- 117,132 ---- DirectCallGenerator(ciMethod* method, bool separate_io_proj) : CallGenerator(method), _separate_io_proj(separate_io_proj) { } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); CallStaticJavaNode* call_node() const { return _call_node; } }; ! JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub();
*** 169,182 **** { assert(vtable_index == Method::invalid_vtable_index || vtable_index >= 0, "either invalid or usable"); } virtual bool is_virtual() const { return true; } ! virtual JVMState* generate(JVMState* jvms); }; ! JVMState* VirtualCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); Node* receiver = kit.argument(0); if (kit.C->log() != NULL) { kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); --- 169,182 ---- { assert(vtable_index == Method::invalid_vtable_index || vtable_index >= 0, "either invalid or usable"); } virtual bool is_virtual() const { return true; } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; ! JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); Node* receiver = kit.argument(0); if (kit.C->log() != NULL) { kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
*** 274,284 **** virtual bool is_late_inline() const { return true; } // Convert the CallStaticJava into an inline virtual void do_late_inline(); ! virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->print_inlining_skip(this); // Record that this call site should be revisited once the main // parse is finished. --- 274,284 ---- virtual bool is_late_inline() const { return true; } // Convert the CallStaticJava into an inline virtual void do_late_inline(); ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); C->print_inlining_skip(this); // Record that this call site should be revisited once the main // parse is finished.
*** 288,298 **** // Emit the CallStaticJava and request separate projections so // that the late inlining logic can distinguish between fall // through and exceptional uses of the memory and io projections // as is done for allocations and macro expansion. ! return DirectCallGenerator::generate(jvms); } virtual void print_inlining_late(const char* msg) { CallNode* call = call_node(); Compile* C = Compile::current(); --- 288,298 ---- // Emit the CallStaticJava and request separate projections so // that the late inlining logic can distinguish between fall // through and exceptional uses of the memory and io projections // as is done for allocations and macro expansion. ! return DirectCallGenerator::generate(jvms, parent_parser); } virtual void print_inlining_late(const char* msg) { CallNode* call = call_node(); Compile* C = Compile::current();
*** 387,397 **** entry_nn->set_jvms(jvms); C->set_default_node_notes(entry_nn); } // Now perform the inling using the synthesized JVMState ! JVMState* new_jvms = _inline_cg->generate(jvms); if (new_jvms == NULL) return; // no change if (C->failing()) return; // Capture any exceptional control flow GraphKit kit(new_jvms); --- 387,397 ---- entry_nn->set_jvms(jvms); C->set_default_node_notes(entry_nn); } // Now perform the inling using the synthesized JVMState ! JVMState* new_jvms = _inline_cg->generate(jvms, NULL); if (new_jvms == NULL) return; // no change if (C->failing()) return; // Capture any exceptional control flow GraphKit kit(new_jvms);
*** 427,438 **** LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} virtual bool is_mh_late_inline() const { return true; } ! virtual JVMState* generate(JVMState* jvms) { ! JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); if (_input_not_const) { // inlining won't be possible so no need to enqueue right now. call_node()->set_generator(this); } else { Compile::current()->add_late_inline(this); --- 427,438 ---- LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {} virtual bool is_mh_late_inline() const { return true; } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { ! JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); if (_input_not_const) { // inlining won't be possible so no need to enqueue right now. call_node()->set_generator(this); } else { Compile::current()->add_late_inline(this);
*** 475,491 **** public: LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} ! virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_string_late_inline(this); ! JVMState* new_jvms = DirectCallGenerator::generate(jvms); return new_jvms; } }; CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) { --- 475,491 ---- public: LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_string_late_inline(this); ! JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); return new_jvms; } }; CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
*** 496,512 **** public: LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} ! virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_boxing_late_inline(this); ! JVMState* new_jvms = DirectCallGenerator::generate(jvms); return new_jvms; } }; CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) { --- 496,512 ---- public: LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); C->print_inlining_skip(this); C->add_boxing_late_inline(this); ! JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); return new_jvms; } }; CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
*** 538,563 **** virtual bool is_inline() const { return _is_inline; } virtual bool is_virtual() const { return _is_virtual; } virtual bool is_deferred() const { return true; } ! virtual JVMState* generate(JVMState* jvms); }; CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, CallGenerator* if_cold, CallGenerator* if_hot) { return new WarmCallGenerator(ci, if_cold, if_hot); } ! JVMState* WarmCallGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); if (C->log() != NULL) { C->log()->elem("warm_call bci='%d'", jvms->bci()); } ! jvms = _if_cold->generate(jvms); if (jvms != NULL) { Node* m = jvms->map()->control(); if (m->is_CatchProj()) m = m->in(0); else m = C->top(); if (m->is_Catch()) m = m->in(0); else m = C->top(); if (m->is_Proj()) m = m->in(0); else m = C->top(); --- 538,563 ---- virtual bool is_inline() const { return _is_inline; } virtual bool is_virtual() const { return _is_virtual; } virtual bool is_deferred() const { return true; } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, CallGenerator* if_cold, CallGenerator* if_hot) { return new WarmCallGenerator(ci, if_cold, if_hot); } ! JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { Compile* C = Compile::current(); if (C->log() != NULL) { C->log()->elem("warm_call bci='%d'", jvms->bci()); } ! jvms = _if_cold->generate(jvms, parent_parser); if (jvms != NULL) { Node* m = jvms->map()->control(); if (m->is_CatchProj()) m = m->in(0); else m = C->top(); if (m->is_Catch()) m = m->in(0); else m = C->top(); if (m->is_Proj()) m = m->in(0); else m = C->top();
*** 614,624 **** virtual bool is_virtual() const { return true; } virtual bool is_inline() const { return _if_hit->is_inline(); } virtual bool is_deferred() const { return _if_hit->is_deferred(); } ! virtual JVMState* generate(JVMState* jvms); }; CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, CallGenerator* if_missed, --- 614,624 ---- virtual bool is_virtual() const { return true; } virtual bool is_inline() const { return _if_hit->is_inline(); } virtual bool is_deferred() const { return _if_hit->is_deferred(); } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, CallGenerator* if_missed,
*** 626,636 **** float hit_prob) { return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); } ! JVMState* PredictedCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. // We share a map with the caller, so his JVMS gets adjusted. Node* receiver = kit.argument(0); --- 626,636 ---- float hit_prob) { return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); } ! JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. // We share a map with the caller, so his JVMS gets adjusted. Node* receiver = kit.argument(0);
*** 654,664 **** SafePointNode* slow_map = NULL; JVMState* slow_jvms; { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { ! slow_jvms = _if_missed->generate(kit.sync_jvms()); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); kit.add_exception_states_from(slow_jvms); kit.set_map(slow_jvms->map()); --- 654,664 ---- SafePointNode* slow_map = NULL; JVMState* slow_jvms; { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { ! slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); kit.add_exception_states_from(slow_jvms); kit.set_map(slow_jvms->map());
*** 675,690 **** // fall through if the instance exactly matches the desired type kit.replace_in_map(receiver, exact_receiver); // Make the hot call: ! JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); if (new_jvms == NULL) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); ! new_jvms = cg->generate(kit.sync_jvms()); } kit.add_exception_states_from(new_jvms); kit.set_jvms(new_jvms); // Need to merge slow and fast? --- 675,690 ---- // fall through if the instance exactly matches the desired type kit.replace_in_map(receiver, exact_receiver); // Make the hot call: ! JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser); if (new_jvms == NULL) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); ! new_jvms = cg->generate(kit.sync_jvms(), parent_parser); } kit.add_exception_states_from(new_jvms); kit.set_jvms(new_jvms); // Need to merge slow and fast?
*** 872,892 **** virtual bool is_virtual() const { return true; } virtual bool is_inlined() const { return true; } virtual bool is_intrinsic() const { return true; } ! virtual JVMState* generate(JVMState* jvms); }; CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic, CallGenerator* cg) { return new PredictedIntrinsicGenerator(intrinsic, cg); } ! JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); CompileLog* log = kit.C->log(); if (log != NULL) { --- 872,892 ---- virtual bool is_virtual() const { return true; } virtual bool is_inlined() const { return true; } virtual bool is_intrinsic() const { return true; } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic, CallGenerator* cg) { return new PredictedIntrinsicGenerator(intrinsic, cg); } ! JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); CompileLog* log = kit.C->log(); if (log != NULL) {
*** 902,912 **** JVMState* slow_jvms; if (slow_ctl != NULL) { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { ! slow_jvms = _cg->generate(kit.sync_jvms()); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); kit.add_exception_states_from(slow_jvms); kit.set_map(slow_jvms->map()); --- 902,912 ---- JVMState* slow_jvms; if (slow_ctl != NULL) { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { ! slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); kit.add_exception_states_from(slow_jvms); kit.set_map(slow_jvms->map());
*** 920,935 **** kit.set_jvms(slow_jvms); return kit.transfer_exceptions_into_jvms(); } // Generate intrinsic code: ! JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); if (new_jvms == NULL) { // Intrinsic failed, so use slow code or make a direct call. if (slow_map == NULL) { CallGenerator* cg = CallGenerator::for_direct_call(method()); ! new_jvms = cg->generate(kit.sync_jvms()); } else { kit.set_jvms(slow_jvms); return kit.transfer_exceptions_into_jvms(); } } --- 920,935 ---- kit.set_jvms(slow_jvms); return kit.transfer_exceptions_into_jvms(); } // Generate intrinsic code: ! JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser); if (new_jvms == NULL) { // Intrinsic failed, so use slow code or make a direct call. if (slow_map == NULL) { CallGenerator* cg = CallGenerator::for_direct_call(method()); ! new_jvms = cg->generate(kit.sync_jvms(), parent_parser); } else { kit.set_jvms(slow_jvms); return kit.transfer_exceptions_into_jvms(); } }
*** 995,1005 **** } virtual bool is_virtual() const { ShouldNotReachHere(); return false; } virtual bool is_trap() const { return true; } ! virtual JVMState* generate(JVMState* jvms); }; CallGenerator* CallGenerator::for_uncommon_trap(ciMethod* m, --- 995,1005 ---- } virtual bool is_virtual() const { ShouldNotReachHere(); return false; } virtual bool is_trap() const { return true; } ! virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); }; CallGenerator* CallGenerator::for_uncommon_trap(ciMethod* m,
*** 1007,1017 **** Deoptimization::DeoptAction action) { return new UncommonTrapCallGenerator(m, reason, action); } ! JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). int nargs = method()->arg_size(); kit.inc_sp(nargs); assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); --- 1007,1017 ---- Deoptimization::DeoptAction action) { return new UncommonTrapCallGenerator(m, reason, action); } ! JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). int nargs = method()->arg_size(); kit.inc_sp(nargs); assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File