src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/opto/callGenerator.cpp

src/share/vm/opto/callGenerator.cpp

Print this page
rev 6110 : 8007988: PrintInlining output is inconsistent with incremental inlining
Summary: fix duplicate and conflicting inlining output
Reviewed-by:

*** 68,77 **** --- 68,78 ---- }; JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { Compile* C = Compile::current(); + C->print_inlining_update(this); if (is_osr()) { // The JVMS for a OSR has a single argument (see its TypeFunc). assert(jvms->depth() == 1, "no inline OSR"); }
*** 124,133 **** --- 125,135 ---- CallStaticJavaNode* call_node() const { return _call_node; } }; JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); + kit.C->print_inlining_update(this); bool is_static = method()->is_static(); address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); if (kit.C->log() != NULL) {
*** 176,185 **** --- 178,189 ---- JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); Node* receiver = kit.argument(0); + kit.C->print_inlining_update(this); + if (kit.C->log() != NULL) { kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); } // If the receiver is a constant null, do not torture the system
*** 276,286 **** // Convert the CallStaticJava into an inline virtual void do_late_inline(); virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); - C->print_inlining_skip(this); // Record that this call site should be revisited once the main // parse is finished. if (!is_mh_late_inline()) { C->add_late_inline(this); --- 280,289 ----
*** 294,307 **** } virtual void print_inlining_late(const char* msg) { CallNode* call = call_node(); Compile* C = Compile::current(); ! C->print_inlining_insert(this); C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); } - }; void LateInlineCallGenerator::do_late_inline() { // Can't inline it CallStaticJavaNode* call = call_node(); --- 297,311 ---- } virtual void print_inlining_late(const char* msg) { CallNode* call = call_node(); Compile* C = Compile::current(); ! C->print_inlining_assert_ready(); C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg); + C->print_inlining_move_to(this); + C->print_inlining_update_delayed(this); } }; void LateInlineCallGenerator::do_late_inline() { // Can't inline it CallStaticJavaNode* call = call_node();
*** 358,376 **** map->ensure_stack(jvms, jvms->method()->max_stack()); for (uint i1 = 0; i1 < nargs; i1++) { map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); } // This check is done here because for_method_handle_inline() method // needs jvms for inlined state. if (!do_late_inline_check(jvms)) { map->disconnect_inputs(NULL, C); return; } - C->print_inlining_insert(this); - CompileLog* log = C->log(); if (log != NULL) { log->head("late_inline method='%d'", log->identify(method())); JVMState* p = jvms; while (p != NULL) { --- 362,382 ---- map->ensure_stack(jvms, jvms->method()->max_stack()); for (uint i1 = 0; i1 < nargs; i1++) { map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1)); } + C->print_inlining_assert_ready(); + + C->print_inlining_move_to(this); + // This check is done here because for_method_handle_inline() method // needs jvms for inlined state. if (!do_late_inline_check(jvms)) { map->disconnect_inputs(NULL, C); return; } CompileLog* log = C->log(); if (log != NULL) { log->head("late_inline method='%d'", log->identify(method())); JVMState* p = jvms; while (p != NULL) {
*** 386,396 **** Node_Notes* entry_nn = old_nn->clone(C); entry_nn->set_jvms(jvms); C->set_default_node_notes(entry_nn); } ! // Now perform the inling using the synthesized JVMState JVMState* new_jvms = _inline_cg->generate(jvms, NULL); if (new_jvms == NULL) return; // no change if (C->failing()) return; // Capture any exceptional control flow --- 392,402 ---- Node_Notes* entry_nn = old_nn->clone(C); entry_nn->set_jvms(jvms); C->set_default_node_notes(entry_nn); } ! // Now perform the inlining using the synthesized JVMState JVMState* new_jvms = _inline_cg->generate(jvms, NULL); if (new_jvms == NULL) return; // no change if (C->failing()) return; // Capture any exceptional control flow
*** 429,457 **** virtual bool is_mh_late_inline() const { return true; } virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); if (_input_not_const) { // inlining won't be possible so no need to enqueue right now. call_node()->set_generator(this); } else { Compile::current()->add_late_inline(this); } return new_jvms; } - - virtual void print_inlining_late(const char* msg) { - if (!_input_not_const) return; - LateInlineCallGenerator::print_inlining_late(msg); - } }; bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); if (!_input_not_const) { _attempt++; } if (cg != NULL) { --- 435,461 ---- virtual bool is_mh_late_inline() const { return true; } virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); + if (_input_not_const) { // inlining won't be possible so no need to enqueue right now. call_node()->set_generator(this); } else { Compile::current()->add_late_inline(this); } return new_jvms; } }; bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) { CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const); + Compile::current()->print_inlining_update_delayed(this); + if (!_input_not_const) { _attempt++; } if (cg != NULL) {
*** 477,488 **** LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); - C->print_inlining_skip(this); - C->add_string_late_inline(this); JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); return new_jvms; } --- 481,490 ----
*** 500,510 **** LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { Compile *C = Compile::current(); - C->print_inlining_skip(this); C->add_boxing_late_inline(this); JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); return new_jvms; --- 502,511 ----
*** 552,561 **** --- 553,564 ---- return new WarmCallGenerator(ci, if_cold, if_hot); } JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { Compile* C = Compile::current(); + C->print_inlining_update(this); + if (C->log() != NULL) { C->log()->elem("warm_call bci='%d'", jvms->bci()); } jvms = _if_cold->generate(jvms, parent_parser); if (jvms != NULL) {
*** 630,639 **** --- 633,643 ---- } JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); + kit.C->print_inlining_update(this); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. // We share a map with the caller, so his JVMS gets adjusted. Node* receiver = kit.argument(0);
*** 777,786 **** --- 781,793 ---- const int vtable_index = Method::invalid_vtable_index; CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; + } else { + const char* msg = "receiver not constant"; + if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); } } break; case vmIntrinsics::_linkToVirtual:
*** 842,856 **** call_does_dispatch, vtable_index); // out-parameters // We lack profiling at this call but type speculation may // provide us with a type speculative_receiver_type = receiver_type->speculative_type(); } - CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; } } break; default: --- 849,865 ---- call_does_dispatch, vtable_index); // out-parameters // We lack profiling at this call but type speculation may // provide us with a type speculative_receiver_type = receiver_type->speculative_type(); } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; + } else { + const char* msg = "member_name not constant"; + if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg); } } break; default:
*** 902,911 **** --- 911,921 ---- Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms()); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff + kit.C->print_inlining_update(this); SafePointNode* slow_map = NULL; JVMState* slow_jvms; if (slow_ctl != NULL) { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl);
*** 1015,1024 **** --- 1025,1035 ---- } JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { GraphKit kit(jvms); + kit.C->print_inlining_update(this); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). int nargs = method()->arg_size(); kit.inc_sp(nargs); assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); if (_reason == Deoptimization::Reason_class_check &&
src/share/vm/opto/callGenerator.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File