1 /*
   2  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciCPCache.hpp"
  27 #include "ci/ciCallSite.hpp"
  28 #include "ci/ciMethodHandle.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "compiler/compileLog.hpp"
  31 #include "interpreter/linkResolver.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/cfgnode.hpp"
  35 #include "opto/mulnode.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/rootnode.hpp"
  38 #include "opto/runtime.hpp"
  39 #include "opto/subnode.hpp"
  40 #include "prims/nativeLookup.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 
  43 #ifndef PRODUCT
  44 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
  45   if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
  46     tty->print("   ");
  47     for( int i = 0; i < depth; i++ ) tty->print("  ");
  48     if (!PrintOpto) {
  49       method->print_short_name();
  50       tty->print(" ->");
  51     }
  52     tty->print(" @ %d  ", bci);
  53     prof_method->print_short_name();
  54     tty->print("  >>TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  55     prof_klass->name()->print_symbol();
  56     tty->print_cr(" (%d bytes)", prof_method->code_size());
  57   }
  58 }
  59 #endif
  60 
  61 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
  62                                        JVMState* jvms, bool allow_inline,
  63                                        float prof_factor) {
  64   CallGenerator* cg;
  65 
  66   // Dtrace currently doesn't work unless all calls are vanilla
  67   if (env()->dtrace_method_probes()) {
  68     allow_inline = false;
  69   }
  70 
  71   // Note: When we get profiling during stage-1 compiles, we want to pull
  72   // from more specific profile data which pertains to this inlining.
  73   // Right now, ignore the information in jvms->caller(), and do method[bci].
  74   ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci());
  75 
  76   // See how many times this site has been invoked.
  77   int site_count = profile.count();
  78   int receiver_count = -1;
  79   if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
  80     // Receivers in the profile structure are ordered by call counts
  81     // so that the most called (major) receiver is profile.receiver(0).
  82     receiver_count = profile.receiver_count(0);
  83   }
  84 
  85   CompileLog* log = this->log();
  86   if (log != NULL) {
  87     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
  88     int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
  89     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
  90                     log->identify(call_method), site_count, prof_factor);
  91     if (call_is_virtual)  log->print(" virtual='1'");
  92     if (allow_inline)     log->print(" inline='1'");
  93     if (receiver_count >= 0) {
  94       log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
  95       if (profile.has_receiver(1)) {
  96         log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
  97       }
  98     }
  99     log->end_elem();
 100   }
 101 
 102   // Special case the handling of certain common, profitable library
 103   // methods.  If these methods are replaced with specialized code,
 104   // then we return it as the inlined version of the call.
 105   // We do this before the strict f.p. check below because the
 106   // intrinsics handle strict f.p. correctly.
 107   if (allow_inline) {
 108     cg = find_intrinsic(call_method, call_is_virtual);
 109     if (cg != NULL)  return cg;
 110   }
 111 
 112   // Do not inline strict fp into non-strict code, or the reverse
 113   bool caller_method_is_strict = jvms->method()->is_strict();
 114   if( caller_method_is_strict ^ call_method->is_strict() ) {
 115     allow_inline = false;
 116   }
 117 
 118   // Attempt to inline...
 119   if (allow_inline) {
 120     // The profile data is only partly attributable to this caller,
 121     // scale back the call site information.
 122     float past_uses = jvms->method()->scale_count(site_count, prof_factor);
 123     // This is the number of times we expect the call code to be used.
 124     float expected_uses = past_uses;
 125 
 126     // Try inlining a bytecoded method:
 127     if (!call_is_virtual) {
 128       InlineTree* ilt;
 129       if (UseOldInlining) {
 130         ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
 131       } else {
 132         // Make a disembodied, stateless ILT.
 133         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
 134         float site_invoke_ratio = prof_factor;
 135         // Note:  ilt is for the root of this parse, not the present call site.
 136         ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
 137       }
 138       WarmCallInfo scratch_ci;
 139       if (!UseOldInlining)
 140         scratch_ci.init(jvms, call_method, profile, prof_factor);
 141       WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci);
 142       assert(ci != &scratch_ci, "do not let this pointer escape");
 143       bool allow_inline   = (ci != NULL && !ci->is_cold());
 144       bool require_inline = (allow_inline && ci->is_hot());
 145 
 146       if (allow_inline) {
 147         CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
 148         if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
 149           // Delay the inlining of this method to give us the
 150           // opportunity to perform some high level optimizations
 151           // first.
 152           return CallGenerator::for_late_inline(call_method, cg);
 153         }
 154         if (cg == NULL) {
 155           // Fall through.
 156         } else if (require_inline || !InlineWarmCalls) {
 157           return cg;
 158         } else {
 159           CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor);
 160           return CallGenerator::for_warm_call(ci, cold_cg, cg);
 161         }
 162       }
 163     }
 164 
 165     // Try using the type profile.
 166     if (call_is_virtual && site_count > 0 && receiver_count > 0) {
 167       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
 168       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
 169       ciMethod* receiver_method = NULL;
 170       if (have_major_receiver || profile.morphism() == 1 ||
 171           (profile.morphism() == 2 && UseBimorphicInlining)) {
 172         // receiver_method = profile.method();
 173         // Profiles do not suggest methods now.  Look it up in the major receiver.
 174         receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
 175                                                       profile.receiver(0));
 176       }
 177       if (receiver_method != NULL) {
 178         // The single majority receiver sufficiently outweighs the minority.
 179         CallGenerator* hit_cg = this->call_generator(receiver_method,
 180               vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
 181         if (hit_cg != NULL) {
 182           // Look up second receiver.
 183           CallGenerator* next_hit_cg = NULL;
 184           ciMethod* next_receiver_method = NULL;
 185           if (profile.morphism() == 2 && UseBimorphicInlining) {
 186             next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
 187                                                                profile.receiver(1));
 188             if (next_receiver_method != NULL) {
 189               next_hit_cg = this->call_generator(next_receiver_method,
 190                                   vtable_index, !call_is_virtual, jvms,
 191                                   allow_inline, prof_factor);
 192               if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
 193                   have_major_receiver && UseOnlyInlinedBimorphic) {
 194                   // Skip if we can't inline second receiver's method
 195                   next_hit_cg = NULL;
 196               }
 197             }
 198           }
 199           CallGenerator* miss_cg;
 200           Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
 201                                     Deoptimization::Reason_bimorphic :
 202                                     Deoptimization::Reason_class_check;
 203           if (( profile.morphism() == 1 ||
 204                (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
 205               !too_many_traps(jvms->method(), jvms->bci(), reason)
 206              ) {
 207             // Generate uncommon trap for class check failure path
 208             // in case of monomorphic or bimorphic virtual call site.
 209             miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
 210                         Deoptimization::Action_maybe_recompile);
 211           } else {
 212             // Generate virtual call for class check failure path
 213             // in case of polymorphic virtual call site.
 214             miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index);
 215           }
 216           if (miss_cg != NULL) {
 217             if (next_hit_cg != NULL) {
 218               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
 219               // We don't need to record dependency on a receiver here and below.
 220               // Whenever we inline, the dependency is added by Parse::Parse().
 221               miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
 222             }
 223             if (miss_cg != NULL) {
 224               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
 225               cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
 226               if (cg != NULL)  return cg;
 227             }
 228           }
 229         }
 230       }
 231     }
 232   }
 233 
 234   // Do MethodHandle calls.
 235   if (call_method->is_method_handle_invoke()) {
 236     if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
 237       GraphKit kit(jvms);
 238       Node* n = kit.argument(0);
 239 
 240       if (n->Opcode() == Op_ConP) {
 241         const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
 242         ciObject* const_oop = oop_ptr->const_oop();
 243         ciMethodHandle* method_handle = const_oop->as_method_handle();
 244 
 245         // Set the actually called method to have access to the class
 246         // and signature in the MethodHandleCompiler.
 247         method_handle->set_callee(call_method);
 248 
 249         // Get an adapter for the MethodHandle.
 250         ciMethod* target_method = method_handle->get_method_handle_adapter();
 251 
 252         CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
 253         if (hit_cg != NULL && hit_cg->is_inline())
 254           return hit_cg;
 255       }
 256 
 257       return CallGenerator::for_direct_call(call_method);
 258     }
 259     else {
 260       // Get the MethodHandle from the CallSite.
 261       ciMethod* caller_method = jvms->method();
 262       ciBytecodeStream str(caller_method);
 263       str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
 264       ciCallSite*     call_site     = str.get_call_site();
 265       ciMethodHandle* method_handle = call_site->get_target();
 266 
 267       // Set the actually called method to have access to the class
 268       // and signature in the MethodHandleCompiler.
 269       method_handle->set_callee(call_method);
 270 
 271       // Get an adapter for the MethodHandle.
 272       ciMethod* target_method = method_handle->get_invokedynamic_adapter();
 273 
 274       CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
 275       if (hit_cg != NULL && hit_cg->is_inline()) {
 276         CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
 277         return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
 278       }
 279 
 280       // If something failed, generate a normal dynamic call.
 281       return CallGenerator::for_dynamic_call(call_method);
 282     }
 283   }
 284 
 285   // There was no special inlining tactic, or it bailed out.
 286   // Use a more generic tactic, like a simple call.
 287   if (call_is_virtual) {
 288     return CallGenerator::for_virtual_call(call_method, vtable_index);
 289   } else {
 290     // Class Hierarchy Analysis or Type Profile reveals a unique target,
 291     // or it is a static or special call.
 292     return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
 293   }
 294 }
 295 
 296 // Return true for methods that shouldn't be inlined early so that
 297 // they are easier to analyze and optimize as intrinsics.
 298 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
 299   if (has_stringbuilder()) {
 300 
 301     if ((call_method->holder() == C->env()->StringBuilder_klass() ||
 302          call_method->holder() == C->env()->StringBuffer_klass()) &&
 303         (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
 304          jvms->method()->holder() == C->env()->StringBuffer_klass())) {
 305       // Delay SB calls only when called from non-SB code
 306       return false;
 307     }
 308 
 309     switch (call_method->intrinsic_id()) {
 310       case vmIntrinsics::_StringBuilder_void:
 311       case vmIntrinsics::_StringBuilder_int:
 312       case vmIntrinsics::_StringBuilder_String:
 313       case vmIntrinsics::_StringBuilder_append_char:
 314       case vmIntrinsics::_StringBuilder_append_int:
 315       case vmIntrinsics::_StringBuilder_append_String:
 316       case vmIntrinsics::_StringBuilder_toString:
 317       case vmIntrinsics::_StringBuffer_void:
 318       case vmIntrinsics::_StringBuffer_int:
 319       case vmIntrinsics::_StringBuffer_String:
 320       case vmIntrinsics::_StringBuffer_append_char:
 321       case vmIntrinsics::_StringBuffer_append_int:
 322       case vmIntrinsics::_StringBuffer_append_String:
 323       case vmIntrinsics::_StringBuffer_toString:
 324       case vmIntrinsics::_Integer_toString:
 325         return true;
 326 
 327       case vmIntrinsics::_String_String:
 328         {
 329           Node* receiver = jvms->map()->in(jvms->argoff() + 1);
 330           if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
 331             CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
 332             ciMethod* m = csj->method();
 333             if (m != NULL &&
 334                 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
 335                  m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
 336               // Delay String.<init>(new SB())
 337               return true;
 338           }
 339           return false;
 340         }
 341 
 342       default:
 343         return false;
 344     }
 345   }
 346   return false;
 347 }
 348 
 349 
 350 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
 351 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
 352   // Additional inputs to consider...
 353   // bc      = bc()
 354   // caller  = method()
 355   // iter().get_method_holder_index()
 356   assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
 357   // Interface classes can be loaded & linked and never get around to
 358   // being initialized.  Uncommon-trap for not-initialized static or
 359   // v-calls.  Let interface calls happen.
 360   ciInstanceKlass* holder_klass = dest_method->holder();
 361   if (!holder_klass->is_being_initialized() &&
 362       !holder_klass->is_initialized() &&
 363       !holder_klass->is_interface()) {
 364     uncommon_trap(Deoptimization::Reason_uninitialized,
 365                   Deoptimization::Action_reinterpret,
 366                   holder_klass);
 367     return true;
 368   }
 369 
 370   assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
 371   return false;
 372 }
 373 
 374 
 375 //------------------------------do_call----------------------------------------
 376 // Handle your basic call.  Inline if we can & want to, else just setup call.
 377 void Parse::do_call() {
 378   // It's likely we are going to add debug info soon.
 379   // Also, if we inline a guy who eventually needs debug info for this JVMS,
 380   // our contribution to it is cleaned up right here.
 381   kill_dead_locals();
 382 
 383   // Set frequently used booleans
 384   bool is_virtual = bc() == Bytecodes::_invokevirtual;
 385   bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
 386   bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
 387   bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
 388 
 389   // Find target being called
 390   bool             will_link;
 391   ciMethod*        dest_method   = iter().get_method(will_link);
 392   ciInstanceKlass* holder_klass  = dest_method->holder();
 393   ciKlass* holder = iter().get_declared_method_holder();
 394   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
 395 
 396   int nargs = dest_method->arg_size();
 397   if (is_invokedynamic)  nargs -= 1;
 398 
 399   // uncommon-trap when callee is unloaded, uninitialized or will not link
 400   // bailout when too many arguments for register representation
 401   if (!will_link || can_not_compile_call_site(dest_method, klass)) {
 402 #ifndef PRODUCT
 403     if (PrintOpto && (Verbose || WizardMode)) {
 404       method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
 405       dest_method->print_name(); tty->cr();
 406     }
 407 #endif
 408     return;
 409   }
 410   assert(holder_klass->is_loaded(), "");
 411   assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
 412   // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
 413   // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
 414   assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
 415   // Note:  In the absence of miranda methods, an abstract class K can perform
 416   // an invokevirtual directly on an interface method I.m if K implements I.
 417 
 418   // ---------------------
 419   // Does Class Hierarchy Analysis reveal only a single target of a v-call?
 420   // Then we may inline or make a static call, but become dependent on there being only 1 target.
 421   // Does the call-site type profile reveal only one receiver?
 422   // Then we may introduce a run-time check and inline on the path where it succeeds.
 423   // The other path may uncommon_trap, check for another receiver, or do a v-call.
 424 
 425   // Choose call strategy.
 426   bool call_is_virtual = is_virtual_or_interface;
 427   int vtable_index = methodOopDesc::invalid_vtable_index;
 428   ciMethod* call_method = dest_method;
 429 
 430   // Try to get the most accurate receiver type
 431   if (is_virtual_or_interface) {
 432     Node*             receiver_node = stack(sp() - nargs);
 433     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
 434     ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
 435 
 436     // Have the call been sufficiently improved such that it is no longer a virtual?
 437     if (optimized_virtual_method != NULL) {
 438       call_method     = optimized_virtual_method;
 439       call_is_virtual = false;
 440     } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) {
 441       // We can make a vtable call at this site
 442       vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
 443     }
 444   }
 445 
 446   // Note:  It's OK to try to inline a virtual call.
 447   // The call generator will not attempt to inline a polymorphic call
 448   // unless it knows how to optimize the receiver dispatch.
 449   bool try_inline = (C->do_inlining() || InlineAccessors);
 450 
 451   // ---------------------
 452   inc_sp(- nargs);              // Temporarily pop args for JVM state of call
 453   JVMState* jvms = sync_jvms();
 454 
 455   // ---------------------
 456   // Decide call tactic.
 457   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
 458   // It decides whether inlining is desirable or not.
 459   CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
 460 
 461   // ---------------------
 462   // Round double arguments before call
 463   round_double_arguments(dest_method);
 464 
 465 #ifndef PRODUCT
 466   // bump global counters for calls
 467   count_compiled_calls(false/*at_method_entry*/, cg->is_inline());
 468 
 469   // Record first part of parsing work for this call
 470   parse_histogram()->record_change();
 471 #endif // not PRODUCT
 472 
 473   assert(jvms == this->jvms(), "still operating on the right JVMS");
 474   assert(jvms_in_sync(),       "jvms must carry full info into CG");
 475 
 476   // save across call, for a subsequent cast_not_null.
 477   Node* receiver = has_receiver ? argument(0) : NULL;
 478 
 479   // Bump method data counters (We profile *before* the call is made
 480   // because exceptions don't return to the call site.)
 481   profile_call(receiver);
 482 
 483   JVMState* new_jvms;
 484   if ((new_jvms = cg->generate(jvms)) == NULL) {
 485     // When inlining attempt fails (e.g., too many arguments),
 486     // it may contaminate the current compile state, making it
 487     // impossible to pull back and try again.  Once we call
 488     // cg->generate(), we are committed.  If it fails, the whole
 489     // compilation task is compromised.
 490     if (failing())  return;
 491 #ifndef PRODUCT
 492     if (PrintOpto || PrintOptoInlining || PrintInlining) {
 493       // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
 494       if (cg->is_intrinsic() && call_method->code_size() > 0) {
 495         tty->print("Bailed out of intrinsic, will not inline: ");
 496         call_method->print_name(); tty->cr();
 497       }
 498     }
 499 #endif
 500     // This can happen if a library intrinsic is available, but refuses
 501     // the call site, perhaps because it did not match a pattern the
 502     // intrinsic was expecting to optimize.  The fallback position is
 503     // to call out-of-line.
 504     try_inline = false;  // Inline tactic bailed out.
 505     cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
 506     if ((new_jvms = cg->generate(jvms)) == NULL) {
 507       guarantee(failing(), "call failed to generate:  calls should work");
 508       return;
 509     }
 510   }
 511 
 512   if (cg->is_inline()) {
 513     // Accumulate has_loops estimate
 514     C->set_has_loops(C->has_loops() || call_method->has_loops());
 515     C->env()->notice_inlined_method(call_method);
 516   }
 517 
 518   // Reset parser state from [new_]jvms, which now carries results of the call.
 519   // Return value (if any) is already pushed on the stack by the cg.
 520   add_exception_states_from(new_jvms);
 521   if (new_jvms->map()->control() == top()) {
 522     stop_and_kill_map();
 523   } else {
 524     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
 525     set_jvms(new_jvms);
 526   }
 527 
 528   if (!stopped()) {
 529     // This was some sort of virtual call, which did a null check for us.
 530     // Now we can assert receiver-not-null, on the normal return path.
 531     if (receiver != NULL && cg->is_virtual()) {
 532       Node* cast = cast_not_null(receiver);
 533       // %%% assert(receiver == cast, "should already have cast the receiver");
 534     }
 535 
 536     // Round double result after a call from strict to non-strict code
 537     round_double_result(dest_method);
 538 
 539     // If the return type of the method is not loaded, assert that the
 540     // value we got is a null.  Otherwise, we need to recompile.
 541     if (!dest_method->return_type()->is_loaded()) {
 542 #ifndef PRODUCT
 543       if (PrintOpto && (Verbose || WizardMode)) {
 544         method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
 545         dest_method->print_name(); tty->cr();
 546       }
 547 #endif
 548       if (C->log() != NULL) {
 549         C->log()->elem("assert_null reason='return' klass='%d'",
 550                        C->log()->identify(dest_method->return_type()));
 551       }
 552       // If there is going to be a trap, put it at the next bytecode:
 553       set_bci(iter().next_bci());
 554       do_null_assert(peek(), T_OBJECT);
 555       set_bci(iter().cur_bci()); // put it back
 556     }
 557   }
 558 
 559   // Restart record of parsing work after possible inlining of call
 560 #ifndef PRODUCT
 561   parse_histogram()->set_initial_state(bc());
 562 #endif
 563 }
 564 
 565 //---------------------------catch_call_exceptions-----------------------------
 566 // Put a Catch and CatchProj nodes behind a just-created call.
 567 // Send their caught exceptions to the proper handler.
 568 // This may be used after a call to the rethrow VM stub,
 569 // when it is needed to process unloaded exception classes.
 570 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
 571   // Exceptions are delivered through this channel:
 572   Node* i_o = this->i_o();
 573 
 574   // Add a CatchNode.
 575   GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
 576   GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
 577   GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
 578 
 579   for (; !handlers.is_done(); handlers.next()) {
 580     ciExceptionHandler* h        = handlers.handler();
 581     int                 h_bci    = h->handler_bci();
 582     ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
 583     // Do not introduce unloaded exception types into the graph:
 584     if (!h_klass->is_loaded()) {
 585       if (saw_unloaded->contains(h_bci)) {
 586         /* We've already seen an unloaded exception with h_bci,
 587            so don't duplicate. Duplication will cause the CatchNode to be
 588            unnecessarily large. See 4713716. */
 589         continue;
 590       } else {
 591         saw_unloaded->append(h_bci);
 592       }
 593     }
 594     const Type*         h_extype = TypeOopPtr::make_from_klass(h_klass);
 595     // (We use make_from_klass because it respects UseUniqueSubclasses.)
 596     h_extype = h_extype->join(TypeInstPtr::NOTNULL);
 597     assert(!h_extype->empty(), "sanity");
 598     // Note:  It's OK if the BCIs repeat themselves.
 599     bcis->append(h_bci);
 600     extypes->append(h_extype);
 601   }
 602 
 603   int len = bcis->length();
 604   CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
 605   Node *catch_ = _gvn.transform(cn);
 606 
 607   // now branch with the exception state to each of the (potential)
 608   // handlers
 609   for(int i=0; i < len; i++) {
 610     // Setup JVM state to enter the handler.
 611     PreserveJVMState pjvms(this);
 612     // Locals are just copied from before the call.
 613     // Get control from the CatchNode.
 614     int handler_bci = bcis->at(i);
 615     Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
 616     // This handler cannot happen?
 617     if (ctrl == top())  continue;
 618     set_control(ctrl);
 619 
 620     // Create exception oop
 621     const TypeInstPtr* extype = extypes->at(i)->is_instptr();
 622     Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o));
 623 
 624     // Handle unloaded exception classes.
 625     if (saw_unloaded->contains(handler_bci)) {
 626       // An unloaded exception type is coming here.  Do an uncommon trap.
 627 #ifndef PRODUCT
 628       // We do not expect the same handler bci to take both cold unloaded
 629       // and hot loaded exceptions.  But, watch for it.
 630       if (extype->is_loaded()) {
 631         tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ");
 632         method()->print_name(); tty->cr();
 633       } else if (PrintOpto && (Verbose || WizardMode)) {
 634         tty->print("Bailing out on unloaded exception type ");
 635         extype->klass()->print_name();
 636         tty->print(" at bci:%d in ", bci());
 637         method()->print_name(); tty->cr();
 638       }
 639 #endif
 640       // Emit an uncommon trap instead of processing the block.
 641       set_bci(handler_bci);
 642       push_ex_oop(ex_oop);
 643       uncommon_trap(Deoptimization::Reason_unloaded,
 644                     Deoptimization::Action_reinterpret,
 645                     extype->klass(), "!loaded exception");
 646       set_bci(iter().cur_bci()); // put it back
 647       continue;
 648     }
 649 
 650     // go to the exception handler
 651     if (handler_bci < 0) {     // merge with corresponding rethrow node
 652       throw_to_exit(make_exception_state(ex_oop));
 653     } else {                      // Else jump to corresponding handle
 654       push_ex_oop(ex_oop);        // Clear stack and push just the oop.
 655       merge_exception(handler_bci);
 656     }
 657   }
 658 
 659   // The first CatchProj is for the normal return.
 660   // (Note:  If this is a call to rethrow_Java, this node goes dead.)
 661   set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
 662 }
 663 
 664 
 665 //----------------------------catch_inline_exceptions--------------------------
 666 // Handle all exceptions thrown by an inlined method or individual bytecode.
 667 // Common case 1: we have no handler, so all exceptions merge right into
 668 // the rethrow case.
 669 // Case 2: we have some handlers, with loaded exception klasses that have
 670 // no subklasses.  We do a Deutsch-Shiffman style type-check on the incoming
 671 // exception oop and branch to the handler directly.
 672 // Case 3: We have some handlers with subklasses or are not loaded at
 673 // compile-time.  We have to call the runtime to resolve the exception.
 674 // So we insert a RethrowCall and all the logic that goes with it.
 675 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
 676   // Caller is responsible for saving away the map for normal control flow!
 677   assert(stopped(), "call set_map(NULL) first");
 678   assert(method()->has_exception_handlers(), "don't come here w/o work to do");
 679 
 680   Node* ex_node = saved_ex_oop(ex_map);
 681   if (ex_node == top()) {
 682     // No action needed.
 683     return;
 684   }
 685   const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
 686   NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
 687   if (ex_type == NULL)
 688     ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
 689 
 690   // determine potential exception handlers
 691   ciExceptionHandlerStream handlers(method(), bci(),
 692                                     ex_type->klass()->as_instance_klass(),
 693                                     ex_type->klass_is_exact());
 694 
 695   // Start executing from the given throw state.  (Keep its stack, for now.)
 696   // Get the exception oop as known at compile time.
 697   ex_node = use_exception_state(ex_map);
 698 
 699   // Get the exception oop klass from its header
 700   Node* ex_klass_node = NULL;
 701   if (has_ex_handler() && !ex_type->klass_is_exact()) {
 702     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
 703     ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
 704 
 705     // Compute the exception klass a little more cleverly.
 706     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
 707     // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
 708     // each arm of the Phi.  If I know something clever about the exceptions
 709     // I'm loading the class from, I can replace the LoadKlass with the
 710     // klass constant for the exception oop.
 711     if( ex_node->is_Phi() ) {
 712       ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
 713       for( uint i = 1; i < ex_node->req(); i++ ) {
 714         Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
 715         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
 716         ex_klass_node->init_req( i, k );
 717       }
 718       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
 719 
 720     }
 721   }
 722 
 723   // Scan the exception table for applicable handlers.
 724   // If none, we can call rethrow() and be done!
 725   // If precise (loaded with no subklasses), insert a D.S. style
 726   // pointer compare to the correct handler and loop back.
 727   // If imprecise, switch to the Rethrow VM-call style handling.
 728 
 729   int remaining = handlers.count_remaining();
 730 
 731   // iterate through all entries sequentially
 732   for (;!handlers.is_done(); handlers.next()) {
 733     ciExceptionHandler* handler = handlers.handler();
 734 
 735     if (handler->is_rethrow()) {
 736       // If we fell off the end of the table without finding an imprecise
 737       // exception klass (and without finding a generic handler) then we
 738       // know this exception is not handled in this method.  We just rethrow
 739       // the exception into the caller.
 740       throw_to_exit(make_exception_state(ex_node));
 741       return;
 742     }
 743 
 744     // exception handler bci range covers throw_bci => investigate further
 745     int handler_bci = handler->handler_bci();
 746 
 747     if (remaining == 1) {
 748       push_ex_oop(ex_node);        // Push exception oop for handler
 749 #ifndef PRODUCT
 750       if (PrintOpto && WizardMode) {
 751         tty->print_cr("  Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
 752       }
 753 #endif
 754       merge_exception(handler_bci); // jump to handler
 755       return;                   // No more handling to be done here!
 756     }
 757 
 758     // Get the handler's klass
 759     ciInstanceKlass* klass = handler->catch_klass();
 760 
 761     if (!klass->is_loaded()) {  // klass is not loaded?
 762       // fall through into catch_call_exceptions which will emit a
 763       // handler with an uncommon trap.
 764       break;
 765     }
 766 
 767     if (klass->is_interface())  // should not happen, but...
 768       break;                    // bail out
 769 
 770     // Check the type of the exception against the catch type
 771     const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
 772     Node* con = _gvn.makecon(tk);
 773     Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
 774     if (!stopped()) {
 775       PreserveJVMState pjvms(this);
 776       const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
 777       assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
 778       Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
 779       push_ex_oop(ex_oop);      // Push exception oop for handler
 780 #ifndef PRODUCT
 781       if (PrintOpto && WizardMode) {
 782         tty->print("  Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
 783         klass->print_name();
 784         tty->cr();
 785       }
 786 #endif
 787       merge_exception(handler_bci);
 788     }
 789     set_control(not_subtype_ctrl);
 790 
 791     // Come here if exception does not match handler.
 792     // Carry on with more handler checks.
 793     --remaining;
 794   }
 795 
 796   assert(!stopped(), "you should return if you finish the chain");
 797 
 798   // Oops, need to call into the VM to resolve the klasses at runtime.
 799   // Note:  This call must not deoptimize, since it is not a real at this bci!
 800   kill_dead_locals();
 801 
 802   make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
 803                     OptoRuntime::rethrow_Type(),
 804                     OptoRuntime::rethrow_stub(),
 805                     NULL, NULL,
 806                     ex_node);
 807 
 808   // Rethrow is a pure call, no side effects, only a result.
 809   // The result cannot be allocated, so we use I_O
 810 
 811   // Catch exceptions from the rethrow
 812   catch_call_exceptions(handlers);
 813 }
 814 
 815 
 816 // (Note:  Moved add_debug_info into GraphKit::add_safepoint_edges.)
 817 
 818 
 819 #ifndef PRODUCT
 820 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
 821   if( CountCompiledCalls ) {
 822     if( at_method_entry ) {
 823       // bump invocation counter if top method (for statistics)
 824       if (CountCompiledCalls && depth() == 1) {
 825         const TypeInstPtr* addr_type = TypeInstPtr::make(method());
 826         Node* adr1 = makecon(addr_type);
 827         Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
 828         increment_counter(adr2);
 829       }
 830     } else if (is_inline) {
 831       switch (bc()) {
 832       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
 833       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
 834       case Bytecodes::_invokestatic:
 835       case Bytecodes::_invokedynamic:
 836       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
 837       default: fatal("unexpected call bytecode");
 838       }
 839     } else {
 840       switch (bc()) {
 841       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
 842       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
 843       case Bytecodes::_invokestatic:
 844       case Bytecodes::_invokedynamic:
 845       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_static_calls_addr()); break;
 846       default: fatal("unexpected call bytecode");
 847       }
 848     }
 849   }
 850 }
 851 #endif //PRODUCT
 852 
 853 
 854 // Identify possible target method and inlining style
 855 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
 856                                    ciMethod *dest_method, const TypeOopPtr* receiver_type) {
 857   // only use for virtual or interface calls
 858 
 859   // If it is obviously final, do not bother to call find_monomorphic_target,
 860   // because the class hierarchy checks are not needed, and may fail due to
 861   // incompletely loaded classes.  Since we do our own class loading checks
 862   // in this module, we may confidently bind to any method.
 863   if (dest_method->can_be_statically_bound()) {
 864     return dest_method;
 865   }
 866 
 867   // Attempt to improve the receiver
 868   bool actual_receiver_is_exact = false;
 869   ciInstanceKlass* actual_receiver = klass;
 870   if (receiver_type != NULL) {
 871     // Array methods are all inherited from Object, and are monomorphic.
 872     if (receiver_type->isa_aryptr() &&
 873         dest_method->holder() == env()->Object_klass()) {
 874       return dest_method;
 875     }
 876 
 877     // All other interesting cases are instance klasses.
 878     if (!receiver_type->isa_instptr()) {
 879       return NULL;
 880     }
 881 
 882     ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
 883     if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
 884         (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
 885       // ikl is a same or better type than the original actual_receiver,
 886       // e.g. static receiver from bytecodes.
 887       actual_receiver = ikl;
 888       // Is the actual_receiver exact?
 889       actual_receiver_is_exact = receiver_type->klass_is_exact();
 890     }
 891   }
 892 
 893   ciInstanceKlass*   calling_klass = caller->holder();
 894   ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
 895   if (cha_monomorphic_target != NULL) {
 896     assert(!cha_monomorphic_target->is_abstract(), "");
 897     // Look at the method-receiver type.  Does it add "too much information"?
 898     ciKlass*    mr_klass = cha_monomorphic_target->holder();
 899     const Type* mr_type  = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
 900     if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
 901       // Calling this method would include an implicit cast to its holder.
 902       // %%% Not yet implemented.  Would throw minor asserts at present.
 903       // %%% The most common wins are already gained by +UseUniqueSubclasses.
 904       // To fix, put the higher_equal check at the call of this routine,
 905       // and add a CheckCastPP to the receiver.
 906       if (TraceDependencies) {
 907         tty->print_cr("found unique CHA method, but could not cast up");
 908         tty->print("  method  = ");
 909         cha_monomorphic_target->print();
 910         tty->cr();
 911       }
 912       if (C->log() != NULL) {
 913         C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
 914                        C->log()->identify(klass),
 915                        C->log()->identify(cha_monomorphic_target));
 916       }
 917       cha_monomorphic_target = NULL;
 918     }
 919   }
 920   if (cha_monomorphic_target != NULL) {
 921     // Hardwiring a virtual.
 922     // If we inlined because CHA revealed only a single target method,
 923     // then we are dependent on that target method not getting overridden
 924     // by dynamic class loading.  Be sure to test the "static" receiver
 925     // dest_method here, as opposed to the actual receiver, which may
 926     // falsely lead us to believe that the receiver is final or private.
 927     C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
 928     return cha_monomorphic_target;
 929   }
 930 
 931   // If the type is exact, we can still bind the method w/o a vcall.
 932   // (This case comes after CHA so we can see how much extra work it does.)
 933   if (actual_receiver_is_exact) {
 934     // In case of evolution, there is a dependence on every inlined method, since each
 935     // such method can be changed when its class is redefined.
 936     ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
 937     if (exact_method != NULL) {
 938 #ifndef PRODUCT
 939       if (PrintOpto) {
 940         tty->print("  Calling method via exact type @%d --- ", bci);
 941         exact_method->print_name();
 942         tty->cr();
 943       }
 944 #endif
 945       return exact_method;
 946     }
 947   }
 948 
 949   return NULL;
 950 }