1 /*
   2  * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_doCall.cpp.incl"
  27 
  28 #ifndef PRODUCT
  29 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
  30   if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
  31     tty->print("   ");
  32     for( int i = 0; i < depth; i++ ) tty->print("  ");
  33     if (!PrintOpto) {
  34       method->print_short_name();
  35       tty->print(" ->");
  36     }
  37     tty->print(" @ %d  ", bci);
  38     prof_method->print_short_name();
  39     tty->print("  >>TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  40     prof_klass->name()->print_symbol();
  41     tty->print_cr(" (%d bytes)", prof_method->code_size());
  42   }
  43 }
  44 #endif
  45 
  46 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) {
  47   CallGenerator* cg;
  48 
  49   // Dtrace currently doesn't work unless all calls are vanilla
  50   if (env()->dtrace_method_probes()) {
  51     allow_inline = false;
  52   }
  53 
  54   // Note: When we get profiling during stage-1 compiles, we want to pull
  55   // from more specific profile data which pertains to this inlining.
  56   // Right now, ignore the information in jvms->caller(), and do method[bci].
  57   ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci());
  58 
  59   // See how many times this site has been invoked.
  60   int site_count = profile.count();
  61   int receiver_count = -1;
  62   if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
  63     // Receivers in the profile structure are ordered by call counts
  64     // so that the most called (major) receiver is profile.receiver(0).
  65     receiver_count = profile.receiver_count(0);
  66   }
  67 
  68   CompileLog* log = this->log();
  69   if (log != NULL) {
  70     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
  71     int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1;
  72     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
  73                     log->identify(call_method), site_count, prof_factor);
  74     if (call_is_virtual)  log->print(" virtual='1'");
  75     if (allow_inline)     log->print(" inline='1'");
  76     if (receiver_count >= 0) {
  77       log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
  78       if (profile.has_receiver(1)) {
  79         log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
  80       }
  81     }
  82     log->end_elem();
  83   }
  84 
  85   // Special case the handling of certain common, profitable library
  86   // methods.  If these methods are replaced with specialized code,
  87   // then we return it as the inlined version of the call.
  88   // We do this before the strict f.p. check below because the
  89   // intrinsics handle strict f.p. correctly.
  90   if (allow_inline) {
  91     cg = find_intrinsic(call_method, call_is_virtual);
  92     if (cg != NULL)  return cg;
  93   }
  94 
  95   // Do not inline strict fp into non-strict code, or the reverse
  96   bool caller_method_is_strict = jvms->method()->is_strict();
  97   if( caller_method_is_strict ^ call_method->is_strict() ) {
  98     allow_inline = false;
  99   }
 100 
 101   // Attempt to inline...
 102   if (allow_inline) {
 103     // The profile data is only partly attributable to this caller,
 104     // scale back the call site information.
 105     float past_uses = jvms->method()->scale_count(site_count, prof_factor);
 106     // This is the number of times we expect the call code to be used.
 107     float expected_uses = past_uses;
 108 
 109     // Try inlining a bytecoded method:
 110     if (!call_is_virtual) {
 111       InlineTree* ilt;
 112       if (UseOldInlining) {
 113         ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
 114       } else {
 115         // Make a disembodied, stateless ILT.
 116         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
 117         float site_invoke_ratio = prof_factor;
 118         // Note:  ilt is for the root of this parse, not the present call site.
 119         ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio);
 120       }
 121       WarmCallInfo scratch_ci;
 122       if (!UseOldInlining)
 123         scratch_ci.init(jvms, call_method, profile, prof_factor);
 124       WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci);
 125       assert(ci != &scratch_ci, "do not let this pointer escape");
 126       bool allow_inline   = (ci != NULL && !ci->is_cold());
 127       bool require_inline = (allow_inline && ci->is_hot());
 128 
 129       if (allow_inline) {
 130         CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
 131         if (cg == NULL) {
 132           // Fall through.
 133         } else if (require_inline || !InlineWarmCalls) {
 134           return cg;
 135         } else {
 136           CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor);
 137           return CallGenerator::for_warm_call(ci, cold_cg, cg);
 138         }
 139       }
 140     }
 141 
 142     // Try using the type profile.
 143     if (call_is_virtual && site_count > 0 && receiver_count > 0) {
 144       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
 145       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
 146       ciMethod* receiver_method = NULL;
 147       if (have_major_receiver || profile.morphism() == 1 ||
 148           (profile.morphism() == 2 && UseBimorphicInlining)) {
 149         // receiver_method = profile.method();
 150         // Profiles do not suggest methods now.  Look it up in the major receiver.
 151         receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
 152                                                       profile.receiver(0));
 153       }
 154       if (receiver_method != NULL) {
 155         // The single majority receiver sufficiently outweighs the minority.
 156         CallGenerator* hit_cg = this->call_generator(receiver_method,
 157               vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
 158         if (hit_cg != NULL) {
 159           // Look up second receiver.
 160           CallGenerator* next_hit_cg = NULL;
 161           ciMethod* next_receiver_method = NULL;
 162           if (profile.morphism() == 2 && UseBimorphicInlining) {
 163             next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
 164                                                                profile.receiver(1));
 165             if (next_receiver_method != NULL) {
 166               next_hit_cg = this->call_generator(next_receiver_method,
 167                                   vtable_index, !call_is_virtual, jvms,
 168                                   allow_inline, prof_factor);
 169               if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
 170                   have_major_receiver && UseOnlyInlinedBimorphic) {
 171                   // Skip if we can't inline second receiver's method
 172                   next_hit_cg = NULL;
 173               }
 174             }
 175           }
 176           CallGenerator* miss_cg;
 177           if (( profile.morphism() == 1 ||
 178                (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
 179 
 180               !too_many_traps(Deoptimization::Reason_class_check)
 181 
 182               // Check only total number of traps per method to allow
 183               // the transition from monomorphic to bimorphic case between
 184               // compilations without falling into virtual call.
 185               // A monomorphic case may have the class_check trap flag is set
 186               // due to the time gap between the uncommon trap processing
 187               // when flags are set in MDO and the call site bytecode execution
 188               // in Interpreter when MDO counters are updated.
 189               // There was also class_check trap in monomorphic case due to
 190               // the bug 6225440.
 191 
 192              ) {
 193             // Generate uncommon trap for class check failure path
 194             // in case of monomorphic or bimorphic virtual call site.
 195             miss_cg = CallGenerator::for_uncommon_trap(call_method,
 196                         Deoptimization::Reason_class_check,
 197                         Deoptimization::Action_maybe_recompile);
 198           } else {
 199             // Generate virtual call for class check failure path
 200             // in case of polymorphic virtual call site.
 201             miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index);
 202           }
 203           if (miss_cg != NULL) {
 204             if (next_hit_cg != NULL) {
 205               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
 206               // We don't need to record dependency on a receiver here and below.
 207               // Whenever we inline, the dependency is added by Parse::Parse().
 208               miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
 209             }
 210             if (miss_cg != NULL) {
 211               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
 212               cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
 213               if (cg != NULL)  return cg;
 214             }
 215           }
 216         }
 217       }
 218     }
 219   }
 220 
 221   // There was no special inlining tactic, or it bailed out.
 222   // Use a more generic tactic, like a simple call.
 223   if (call_is_virtual) {
 224     return CallGenerator::for_virtual_call(call_method, vtable_index);
 225   } else {
 226     // Class Hierarchy Analysis or Type Profile reveals a unique target,
 227     // or it is a static or special call.
 228     return CallGenerator::for_direct_call(call_method);
 229   }
 230 }
 231 
 232 
 233 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
 234 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
 235   // Additional inputs to consider...
 236   // bc      = bc()
 237   // caller  = method()
 238   // iter().get_method_holder_index()
 239   assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
 240   // Interface classes can be loaded & linked and never get around to
 241   // being initialized.  Uncommon-trap for not-initialized static or
 242   // v-calls.  Let interface calls happen.
 243   ciInstanceKlass* holder_klass  = dest_method->holder();
 244   if (!holder_klass->is_initialized() &&
 245       !holder_klass->is_interface()) {
 246     uncommon_trap(Deoptimization::Reason_uninitialized,
 247                   Deoptimization::Action_reinterpret,
 248                   holder_klass);
 249     return true;
 250   }
 251   if (dest_method->is_method_handle_invoke()
 252       && holder_klass->name() == ciSymbol::java_dyn_InvokeDynamic()) {
 253     // FIXME: NYI
 254     uncommon_trap(Deoptimization::Reason_unhandled,
 255                   Deoptimization::Action_none,
 256                   holder_klass);
 257     return true;
 258   }
 259 
 260   assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
 261   return false;
 262 }
 263 
 264 
 265 //------------------------------do_call----------------------------------------
 266 // Handle your basic call.  Inline if we can & want to, else just setup call.
 267 void Parse::do_call() {
 268   // It's likely we are going to add debug info soon.
 269   // Also, if we inline a guy who eventually needs debug info for this JVMS,
 270   // our contribution to it is cleaned up right here.
 271   kill_dead_locals();
 272 
 273   // Set frequently used booleans
 274   bool is_virtual = bc() == Bytecodes::_invokevirtual;
 275   bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
 276   bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
 277 
 278   // Find target being called
 279   bool             will_link;
 280   ciMethod*        dest_method   = iter().get_method(will_link);
 281   ciInstanceKlass* holder_klass  = dest_method->holder();
 282   ciKlass* holder = iter().get_declared_method_holder();
 283   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
 284 
 285   int   nargs    = dest_method->arg_size();
 286 
 287   // uncommon-trap when callee is unloaded, uninitialized or will not link
 288   // bailout when too many arguments for register representation
 289   if (!will_link || can_not_compile_call_site(dest_method, klass)) {
 290 #ifndef PRODUCT
 291     if (PrintOpto && (Verbose || WizardMode)) {
 292       method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
 293       dest_method->print_name(); tty->cr();
 294     }
 295 #endif
 296     return;
 297   }
 298   assert(holder_klass->is_loaded(), "");
 299   assert(dest_method->is_static() == !has_receiver, "must match bc");
 300   // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
 301   // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
 302   assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
 303   // Note:  In the absence of miranda methods, an abstract class K can perform
 304   // an invokevirtual directly on an interface method I.m if K implements I.
 305 
 306   // ---------------------
 307   // Does Class Hierarchy Analysis reveal only a single target of a v-call?
 308   // Then we may inline or make a static call, but become dependent on there being only 1 target.
 309   // Does the call-site type profile reveal only one receiver?
 310   // Then we may introduce a run-time check and inline on the path where it succeeds.
 311   // The other path may uncommon_trap, check for another receiver, or do a v-call.
 312 
 313   // Choose call strategy.
 314   bool call_is_virtual = is_virtual_or_interface;
 315   int vtable_index = methodOopDesc::invalid_vtable_index;
 316   ciMethod* call_method = dest_method;
 317 
 318   // Try to get the most accurate receiver type
 319   if (is_virtual_or_interface) {
 320     Node*             receiver_node = stack(sp() - nargs);
 321     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
 322     ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
 323 
 324     // Have the call been sufficiently improved such that it is no longer a virtual?
 325     if (optimized_virtual_method != NULL) {
 326       call_method     = optimized_virtual_method;
 327       call_is_virtual = false;
 328     } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) {
 329       // We can make a vtable call at this site
 330       vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
 331     }
 332   }
 333 
 334   // Note:  It's OK to try to inline a virtual call.
 335   // The call generator will not attempt to inline a polymorphic call
 336   // unless it knows how to optimize the receiver dispatch.
 337   bool try_inline = (C->do_inlining() || InlineAccessors);
 338 
 339   // ---------------------
 340   inc_sp(- nargs);              // Temporarily pop args for JVM state of call
 341   JVMState* jvms = sync_jvms();
 342 
 343   // ---------------------
 344   // Decide call tactic.
 345   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
 346   // It decides whether inlining is desirable or not.
 347   CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
 348 
 349   // ---------------------
 350   // Round double arguments before call
 351   round_double_arguments(dest_method);
 352 
 353 #ifndef PRODUCT
 354   // bump global counters for calls
 355   count_compiled_calls(false/*at_method_entry*/, cg->is_inline());
 356 
 357   // Record first part of parsing work for this call
 358   parse_histogram()->record_change();
 359 #endif // not PRODUCT
 360 
 361   assert(jvms == this->jvms(), "still operating on the right JVMS");
 362   assert(jvms_in_sync(),       "jvms must carry full info into CG");
 363 
 364   // save across call, for a subsequent cast_not_null.
 365   Node* receiver = has_receiver ? argument(0) : NULL;
 366 
 367   // Bump method data counters (We profile *before* the call is made
 368   // because exceptions don't return to the call site.)
 369   profile_call(receiver);
 370 
 371   JVMState* new_jvms;
 372   if ((new_jvms = cg->generate(jvms)) == NULL) {
 373     // When inlining attempt fails (e.g., too many arguments),
 374     // it may contaminate the current compile state, making it
 375     // impossible to pull back and try again.  Once we call
 376     // cg->generate(), we are committed.  If it fails, the whole
 377     // compilation task is compromised.
 378     if (failing())  return;
 379 #ifndef PRODUCT
 380     if (PrintOpto || PrintOptoInlining || PrintInlining) {
 381       // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
 382       if (cg->is_intrinsic() && call_method->code_size() > 0) {
 383         tty->print("Bailed out of intrinsic, will not inline: ");
 384         call_method->print_name(); tty->cr();
 385       }
 386     }
 387 #endif
 388     // This can happen if a library intrinsic is available, but refuses
 389     // the call site, perhaps because it did not match a pattern the
 390     // intrinsic was expecting to optimize.  The fallback position is
 391     // to call out-of-line.
 392     try_inline = false;  // Inline tactic bailed out.
 393     cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
 394     if ((new_jvms = cg->generate(jvms)) == NULL) {
 395       guarantee(failing(), "call failed to generate:  calls should work");
 396       return;
 397     }
 398   }
 399 
 400   if (cg->is_inline()) {
 401     // Accumulate has_loops estimate
 402     C->set_has_loops(C->has_loops() || call_method->has_loops());
 403     C->env()->notice_inlined_method(call_method);
 404   }
 405 
 406   // Reset parser state from [new_]jvms, which now carries results of the call.
 407   // Return value (if any) is already pushed on the stack by the cg.
 408   add_exception_states_from(new_jvms);
 409   if (new_jvms->map()->control() == top()) {
 410     stop_and_kill_map();
 411   } else {
 412     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
 413     set_jvms(new_jvms);
 414   }
 415 
 416   if (!stopped()) {
 417     // This was some sort of virtual call, which did a null check for us.
 418     // Now we can assert receiver-not-null, on the normal return path.
 419     if (receiver != NULL && cg->is_virtual()) {
 420       Node* cast = cast_not_null(receiver);
 421       // %%% assert(receiver == cast, "should already have cast the receiver");
 422     }
 423 
 424     // Round double result after a call from strict to non-strict code
 425     round_double_result(dest_method);
 426 
 427     // If the return type of the method is not loaded, assert that the
 428     // value we got is a null.  Otherwise, we need to recompile.
 429     if (!dest_method->return_type()->is_loaded()) {
 430 #ifndef PRODUCT
 431       if (PrintOpto && (Verbose || WizardMode)) {
 432         method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
 433         dest_method->print_name(); tty->cr();
 434       }
 435 #endif
 436       if (C->log() != NULL) {
 437         C->log()->elem("assert_null reason='return' klass='%d'",
 438                        C->log()->identify(dest_method->return_type()));
 439       }
 440       // If there is going to be a trap, put it at the next bytecode:
 441       set_bci(iter().next_bci());
 442       do_null_assert(peek(), T_OBJECT);
 443       set_bci(iter().cur_bci()); // put it back
 444     }
 445   }
 446 
 447   // Restart record of parsing work after possible inlining of call
 448 #ifndef PRODUCT
 449   parse_histogram()->set_initial_state(bc());
 450 #endif
 451 }
 452 
 453 //---------------------------catch_call_exceptions-----------------------------
 454 // Put a Catch and CatchProj nodes behind a just-created call.
 455 // Send their caught exceptions to the proper handler.
 456 // This may be used after a call to the rethrow VM stub,
 457 // when it is needed to process unloaded exception classes.
 458 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
 459   // Exceptions are delivered through this channel:
 460   Node* i_o = this->i_o();
 461 
 462   // Add a CatchNode.
 463   GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
 464   GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
 465   GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
 466 
 467   for (; !handlers.is_done(); handlers.next()) {
 468     ciExceptionHandler* h        = handlers.handler();
 469     int                 h_bci    = h->handler_bci();
 470     ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
 471     // Do not introduce unloaded exception types into the graph:
 472     if (!h_klass->is_loaded()) {
 473       if (saw_unloaded->contains(h_bci)) {
 474         /* We've already seen an unloaded exception with h_bci,
 475            so don't duplicate. Duplication will cause the CatchNode to be
 476            unnecessarily large. See 4713716. */
 477         continue;
 478       } else {
 479         saw_unloaded->append(h_bci);
 480       }
 481     }
 482     const Type*         h_extype = TypeOopPtr::make_from_klass(h_klass);
 483     // (We use make_from_klass because it respects UseUniqueSubclasses.)
 484     h_extype = h_extype->join(TypeInstPtr::NOTNULL);
 485     assert(!h_extype->empty(), "sanity");
 486     // Note:  It's OK if the BCIs repeat themselves.
 487     bcis->append(h_bci);
 488     extypes->append(h_extype);
 489   }
 490 
 491   int len = bcis->length();
 492   CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
 493   Node *catch_ = _gvn.transform(cn);
 494 
 495   // now branch with the exception state to each of the (potential)
 496   // handlers
 497   for(int i=0; i < len; i++) {
 498     // Setup JVM state to enter the handler.
 499     PreserveJVMState pjvms(this);
 500     // Locals are just copied from before the call.
 501     // Get control from the CatchNode.
 502     int handler_bci = bcis->at(i);
 503     Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
 504     // This handler cannot happen?
 505     if (ctrl == top())  continue;
 506     set_control(ctrl);
 507 
 508     // Create exception oop
 509     const TypeInstPtr* extype = extypes->at(i)->is_instptr();
 510     Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o));
 511 
 512     // Handle unloaded exception classes.
 513     if (saw_unloaded->contains(handler_bci)) {
 514       // An unloaded exception type is coming here.  Do an uncommon trap.
 515 #ifndef PRODUCT
 516       // We do not expect the same handler bci to take both cold unloaded
 517       // and hot loaded exceptions.  But, watch for it.
 518       if (extype->is_loaded()) {
 519         tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ");
 520         method()->print_name(); tty->cr();
 521       } else if (PrintOpto && (Verbose || WizardMode)) {
 522         tty->print("Bailing out on unloaded exception type ");
 523         extype->klass()->print_name();
 524         tty->print(" at bci:%d in ", bci());
 525         method()->print_name(); tty->cr();
 526       }
 527 #endif
 528       // Emit an uncommon trap instead of processing the block.
 529       set_bci(handler_bci);
 530       push_ex_oop(ex_oop);
 531       uncommon_trap(Deoptimization::Reason_unloaded,
 532                     Deoptimization::Action_reinterpret,
 533                     extype->klass(), "!loaded exception");
 534       set_bci(iter().cur_bci()); // put it back
 535       continue;
 536     }
 537 
 538     // go to the exception handler
 539     if (handler_bci < 0) {     // merge with corresponding rethrow node
 540       throw_to_exit(make_exception_state(ex_oop));
 541     } else {                      // Else jump to corresponding handle
 542       push_ex_oop(ex_oop);        // Clear stack and push just the oop.
 543       merge_exception(handler_bci);
 544     }
 545   }
 546 
 547   // The first CatchProj is for the normal return.
 548   // (Note:  If this is a call to rethrow_Java, this node goes dead.)
 549   set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
 550 }
 551 
 552 
 553 //----------------------------catch_inline_exceptions--------------------------
 554 // Handle all exceptions thrown by an inlined method or individual bytecode.
 555 // Common case 1: we have no handler, so all exceptions merge right into
 556 // the rethrow case.
 557 // Case 2: we have some handlers, with loaded exception klasses that have
 558 // no subklasses.  We do a Deutsch-Shiffman style type-check on the incoming
 559 // exception oop and branch to the handler directly.
 560 // Case 3: We have some handlers with subklasses or are not loaded at
 561 // compile-time.  We have to call the runtime to resolve the exception.
 562 // So we insert a RethrowCall and all the logic that goes with it.
 563 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
 564   // Caller is responsible for saving away the map for normal control flow!
 565   assert(stopped(), "call set_map(NULL) first");
 566   assert(method()->has_exception_handlers(), "don't come here w/o work to do");
 567 
 568   Node* ex_node = saved_ex_oop(ex_map);
 569   if (ex_node == top()) {
 570     // No action needed.
 571     return;
 572   }
 573   const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
 574   NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
 575   if (ex_type == NULL)
 576     ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
 577 
 578   // determine potential exception handlers
 579   ciExceptionHandlerStream handlers(method(), bci(),
 580                                     ex_type->klass()->as_instance_klass(),
 581                                     ex_type->klass_is_exact());
 582 
 583   // Start executing from the given throw state.  (Keep its stack, for now.)
 584   // Get the exception oop as known at compile time.
 585   ex_node = use_exception_state(ex_map);
 586 
 587   // Get the exception oop klass from its header
 588   Node* ex_klass_node = NULL;
 589   if (has_ex_handler() && !ex_type->klass_is_exact()) {
 590     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
 591     ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
 592 
 593     // Compute the exception klass a little more cleverly.
 594     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
 595     // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
 596     // each arm of the Phi.  If I know something clever about the exceptions
 597     // I'm loading the class from, I can replace the LoadKlass with the
 598     // klass constant for the exception oop.
 599     if( ex_node->is_Phi() ) {
 600       ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
 601       for( uint i = 1; i < ex_node->req(); i++ ) {
 602         Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
 603         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
 604         ex_klass_node->init_req( i, k );
 605       }
 606       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
 607 
 608     }
 609   }
 610 
 611   // Scan the exception table for applicable handlers.
 612   // If none, we can call rethrow() and be done!
 613   // If precise (loaded with no subklasses), insert a D.S. style
 614   // pointer compare to the correct handler and loop back.
 615   // If imprecise, switch to the Rethrow VM-call style handling.
 616 
 617   int remaining = handlers.count_remaining();
 618 
 619   // iterate through all entries sequentially
 620   for (;!handlers.is_done(); handlers.next()) {
 621     // Do nothing if turned off
 622     if( !DeutschShiffmanExceptions ) break;
 623     ciExceptionHandler* handler = handlers.handler();
 624 
 625     if (handler->is_rethrow()) {
 626       // If we fell off the end of the table without finding an imprecise
 627       // exception klass (and without finding a generic handler) then we
 628       // know this exception is not handled in this method.  We just rethrow
 629       // the exception into the caller.
 630       throw_to_exit(make_exception_state(ex_node));
 631       return;
 632     }
 633 
 634     // exception handler bci range covers throw_bci => investigate further
 635     int handler_bci = handler->handler_bci();
 636 
 637     if (remaining == 1) {
 638       push_ex_oop(ex_node);        // Push exception oop for handler
 639 #ifndef PRODUCT
 640       if (PrintOpto && WizardMode) {
 641         tty->print_cr("  Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
 642       }
 643 #endif
 644       merge_exception(handler_bci); // jump to handler
 645       return;                   // No more handling to be done here!
 646     }
 647 
 648     // %%% The following logic replicates make_from_klass_unique.
 649     // TO DO:  Replace by a subroutine call.  Then generalize
 650     // the type check, as noted in the next "%%%" comment.
 651 
 652     ciInstanceKlass* klass = handler->catch_klass();
 653     if (UseUniqueSubclasses) {
 654       // (We use make_from_klass because it respects UseUniqueSubclasses.)
 655       const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass);
 656       klass = tp->klass()->as_instance_klass();
 657     }
 658 
 659     // Get the handler's klass
 660     if (!klass->is_loaded())    // klass is not loaded?
 661       break;                    // Must call Rethrow!
 662     if (klass->is_interface())  // should not happen, but...
 663       break;                    // bail out
 664     // See if the loaded exception klass has no subtypes
 665     if (klass->has_subklass())
 666       break;                    // Cannot easily do precise test ==> Rethrow
 667 
 668     // %%% Now that subclass checking is very fast, we need to rewrite
 669     // this section and remove the option "DeutschShiffmanExceptions".
 670     // The exception processing chain should be a normal typecase pattern,
 671     // with a bailout to the interpreter only in the case of unloaded
 672     // classes.  (The bailout should mark the method non-entrant.)
 673     // This rewrite should be placed in GraphKit::, not Parse::.
 674 
 675     // Add a dependence; if any subclass added we need to recompile
 676     // %%% should use stronger assert_unique_concrete_subtype instead
 677     if (!klass->is_final()) {
 678       C->dependencies()->assert_leaf_type(klass);
 679     }
 680 
 681     // Implement precise test
 682     const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
 683     Node* con = _gvn.makecon(tk);
 684     Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) );
 685     Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
 686     { BuildCutout unless(this, bol, PROB_LIKELY(0.7f));
 687       const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass);
 688       Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
 689       push_ex_oop(ex_oop);      // Push exception oop for handler
 690 #ifndef PRODUCT
 691       if (PrintOpto && WizardMode) {
 692         tty->print("  Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
 693         klass->print_name();
 694         tty->cr();
 695       }
 696 #endif
 697       merge_exception(handler_bci);
 698     }
 699 
 700     // Come here if exception does not match handler.
 701     // Carry on with more handler checks.
 702     --remaining;
 703   }
 704 
 705   assert(!stopped(), "you should return if you finish the chain");
 706 
 707   if (remaining == 1) {
 708     // Further checks do not matter.
 709   }
 710 
 711   if (can_rerun_bytecode()) {
 712     // Do not push_ex_oop here!
 713     // Re-executing the bytecode will reproduce the throwing condition.
 714     bool must_throw = true;
 715     uncommon_trap(Deoptimization::Reason_unhandled,
 716                   Deoptimization::Action_none,
 717                   (ciKlass*)NULL, (const char*)NULL, // default args
 718                   must_throw);
 719     return;
 720   }
 721 
 722   // Oops, need to call into the VM to resolve the klasses at runtime.
 723   // Note:  This call must not deoptimize, since it is not a real at this bci!
 724   kill_dead_locals();
 725 
 726   make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
 727                     OptoRuntime::rethrow_Type(),
 728                     OptoRuntime::rethrow_stub(),
 729                     NULL, NULL,
 730                     ex_node);
 731 
 732   // Rethrow is a pure call, no side effects, only a result.
 733   // The result cannot be allocated, so we use I_O
 734 
 735   // Catch exceptions from the rethrow
 736   catch_call_exceptions(handlers);
 737 }
 738 
 739 
 740 // (Note:  Moved add_debug_info into GraphKit::add_safepoint_edges.)
 741 
 742 
 743 #ifndef PRODUCT
 744 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
 745   if( CountCompiledCalls ) {
 746     if( at_method_entry ) {
 747       // bump invocation counter if top method (for statistics)
 748       if (CountCompiledCalls && depth() == 1) {
 749         const TypeInstPtr* addr_type = TypeInstPtr::make(method());
 750         Node* adr1 = makecon(addr_type);
 751         Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
 752         increment_counter(adr2);
 753       }
 754     } else if (is_inline) {
 755       switch (bc()) {
 756       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
 757       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
 758       case Bytecodes::_invokestatic:
 759       case Bytecodes::_invokedynamic:
 760       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
 761       default: fatal("unexpected call bytecode");
 762       }
 763     } else {
 764       switch (bc()) {
 765       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
 766       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
 767       case Bytecodes::_invokestatic:
 768       case Bytecodes::_invokedynamic:
 769       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_static_calls_addr()); break;
 770       default: fatal("unexpected call bytecode");
 771       }
 772     }
 773   }
 774 }
 775 #endif //PRODUCT
 776 
 777 
 778 // Identify possible target method and inlining style
 779 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
 780                                    ciMethod *dest_method, const TypeOopPtr* receiver_type) {
 781   // only use for virtual or interface calls
 782 
 783   // If it is obviously final, do not bother to call find_monomorphic_target,
 784   // because the class hierarchy checks are not needed, and may fail due to
 785   // incompletely loaded classes.  Since we do our own class loading checks
 786   // in this module, we may confidently bind to any method.
 787   if (dest_method->can_be_statically_bound()) {
 788     return dest_method;
 789   }
 790 
 791   // Attempt to improve the receiver
 792   bool actual_receiver_is_exact = false;
 793   ciInstanceKlass* actual_receiver = klass;
 794   if (receiver_type != NULL) {
 795     // Array methods are all inherited from Object, and are monomorphic.
 796     if (receiver_type->isa_aryptr() &&
 797         dest_method->holder() == env()->Object_klass()) {
 798       return dest_method;
 799     }
 800 
 801     // All other interesting cases are instance klasses.
 802     if (!receiver_type->isa_instptr()) {
 803       return NULL;
 804     }
 805 
 806     ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
 807     if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
 808         (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
 809       // ikl is a same or better type than the original actual_receiver,
 810       // e.g. static receiver from bytecodes.
 811       actual_receiver = ikl;
 812       // Is the actual_receiver exact?
 813       actual_receiver_is_exact = receiver_type->klass_is_exact();
 814     }
 815   }
 816 
 817   ciInstanceKlass*   calling_klass = caller->holder();
 818   ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
 819   if (cha_monomorphic_target != NULL) {
 820     assert(!cha_monomorphic_target->is_abstract(), "");
 821     // Look at the method-receiver type.  Does it add "too much information"?
 822     ciKlass*    mr_klass = cha_monomorphic_target->holder();
 823     const Type* mr_type  = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
 824     if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
 825       // Calling this method would include an implicit cast to its holder.
 826       // %%% Not yet implemented.  Would throw minor asserts at present.
 827       // %%% The most common wins are already gained by +UseUniqueSubclasses.
 828       // To fix, put the higher_equal check at the call of this routine,
 829       // and add a CheckCastPP to the receiver.
 830       if (TraceDependencies) {
 831         tty->print_cr("found unique CHA method, but could not cast up");
 832         tty->print("  method  = ");
 833         cha_monomorphic_target->print();
 834         tty->cr();
 835       }
 836       if (C->log() != NULL) {
 837         C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
 838                        C->log()->identify(klass),
 839                        C->log()->identify(cha_monomorphic_target));
 840       }
 841       cha_monomorphic_target = NULL;
 842     }
 843   }
 844   if (cha_monomorphic_target != NULL) {
 845     // Hardwiring a virtual.
 846     // If we inlined because CHA revealed only a single target method,
 847     // then we are dependent on that target method not getting overridden
 848     // by dynamic class loading.  Be sure to test the "static" receiver
 849     // dest_method here, as opposed to the actual receiver, which may
 850     // falsely lead us to believe that the receiver is final or private.
 851     C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
 852     return cha_monomorphic_target;
 853   }
 854 
 855   // If the type is exact, we can still bind the method w/o a vcall.
 856   // (This case comes after CHA so we can see how much extra work it does.)
 857   if (actual_receiver_is_exact) {
 858     // In case of evolution, there is a dependence on every inlined method, since each
 859     // such method can be changed when its class is redefined.
 860     ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
 861     if (exact_method != NULL) {
 862 #ifndef PRODUCT
 863       if (PrintOpto) {
 864         tty->print("  Calling method via exact type @%d --- ", bci);
 865         exact_method->print_name();
 866         tty->cr();
 867       }
 868 #endif
 869       return exact_method;
 870     }
 871   }
 872 
 873   return NULL;
 874 }