< prev index next >

src/share/vm/code/compiledIC.cpp

Print this page
rev 12270 : 8160543: C1: Crash in java.lang.String.indexOf in some java.sql tests
Summary: C1 must use unverified entry point for unloaded methods.
Reviewed-by:


 443 
 444     if (TraceICs) {
 445       ResourceMark rm(thread);
 446       assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
 447       tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
 448         p2i(instruction_address()),
 449         ((Klass*)info.cached_metadata())->print_value_string(),
 450         (safe) ? "" : "via stub");
 451     }
 452   }
 453   // We can't check this anymore. With lazy deopt we could have already
 454   // cleaned this IC entry before we even return. This is possible if
 455   // we ran out of space in the inline cache buffer trying to do the
 456   // set_next and we safepointed to free up space. This is a benign
 457   // race because the IC entry was complete when we safepointed so
 458   // cleaning it immediately is harmless.
 459   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 460 }
 461 
 462 
 463 // is_optimized: Compiler has generated an optimized call (i.e., no inline
 464 // cache) static_bound: The call can be static bound (i.e, no need to use
 465 // inline cache)


 466 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 467                                            KlassHandle receiver_klass,
 468                                            bool is_optimized,
 469                                            bool static_bound,
 470                                            CompiledICInfo& info,
 471                                            TRAPS) {
 472   CompiledMethod* method_code = method->code();
 473 
 474   address entry = NULL;
 475   if (method_code != NULL && method_code->is_in_use()) {
 476     assert(method_code->is_compiled(), "must be compiled");
 477     // Call to compiled code
 478     if (static_bound || is_optimized) {
















 479       entry      = method_code->verified_entry_point();
 480     } else {
 481       entry      = method_code->entry_point();
 482     }
 483   }
 484   if (entry != NULL) {
 485     // Call to compiled code
 486     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
 487   } else {
 488     // Note: the following problem exists with Compiler1:
 489     //   - at compile time we may or may not know if the destination is final
 490     //   - if we know that the destination is final, we will emit an optimized
 491     //     virtual call (no inline cache), and need a Method* to make a call
 492     //     to the interpreter
 493     //   - if we do not know if the destination is final, we emit a standard
 494     //     virtual call, and use CompiledICHolder to call interpreted code
 495     //     (no static call stub has been generated)
 496     //     However in that case we will now notice it is static_bound
 497     //     and convert the call into what looks to be an optimized
 498     //     virtual call. This causes problems in verifying the IC because
 499     //     it look vanilla but is optimized. Code in is_call_to_interpreted
 500     //     is aware of this and weakens its asserts.
 501 
 502     // static_bound should imply is_optimized -- otherwise we have a
 503     // performance bug (statically-bindable method is called via
 504     // dynamically-dispatched call note: the reverse implication isn't
 505     // necessarily true -- the call may have been optimized based on compiler
 506     // analysis (static_bound is only based on "final" etc.)
 507 #ifdef COMPILER2
 508 #ifdef TIERED
 509 #if defined(ASSERT)
 510     // can't check the assert because we don't have the CompiledIC with which to
 511     // find the address if the call instruction.
 512     //
 513     // CodeBlob* cb = find_blob_unsafe(instruction_address());
 514     // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
 515 #endif // ASSERT
 516 #else
 517     assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
 518 #endif // TIERED
 519 #endif // COMPILER2
 520     if (is_optimized) {
 521       // Use stub entry
 522       info.set_interpreter_entry(method()->get_c2i_entry(), method());
 523     } else {
 524       // Use icholder entry
 525       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 526       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
 527       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 528     }
 529   }
 530   assert(info.is_optimized() == is_optimized, "must agree");
 531 }
 532 
 533 
 534 bool CompiledIC::is_icholder_entry(address entry) {
 535   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 536   return (cb != NULL && cb->is_adapter_blob());
 537 }
 538 
 539 // Release the CompiledICHolder* associated with this call site is there is one.




 443 
 444     if (TraceICs) {
 445       ResourceMark rm(thread);
 446       assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
 447       tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
 448         p2i(instruction_address()),
 449         ((Klass*)info.cached_metadata())->print_value_string(),
 450         (safe) ? "" : "via stub");
 451     }
 452   }
 453   // We can't check this anymore. With lazy deopt we could have already
 454   // cleaned this IC entry before we even return. This is possible if
 455   // we ran out of space in the inline cache buffer trying to do the
 456   // set_next and we safepointed to free up space. This is a benign
 457   // race because the IC entry was complete when we safepointed so
 458   // cleaning it immediately is harmless.
 459   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 460 }
 461 
 462 
 463 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
 464 // static_bound: The call can be static bound. If it isn't also optimized, the property
 465 // wasn't provable at time of compilation. An optimized call will have any necessary
 466 // null check, while a static_bound won't. A static_bound (but not optimized) must
 467 // therefore use the unverified entry point.
 468 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 469                                            KlassHandle receiver_klass,
 470                                            bool is_optimized,
 471                                            bool static_bound,
 472                                            CompiledICInfo& info,
 473                                            TRAPS) {
 474   CompiledMethod* method_code = method->code();
 475 
 476   address entry = NULL;
 477   if (method_code != NULL && method_code->is_in_use()) {
 478     assert(method_code->is_compiled(), "must be compiled");
 479     // Call to compiled code
 480     //
 481     // Note: the following problem exists with Compiler1:
 482     //   - at compile time we may or may not know if the destination is final
 483     //   - if we know that the destination is final (is_optimized), we will emit
 484     //     an optimized virtual call (no inline cache), and need a Method* to make
 485     //     a call to the interpreter
 486     //   - if we don't know if the destination is final, we emit a standard
 487     //     virtual call, and use CompiledICHolder to call interpreted code
 488     //     (no static call stub has been generated)
 489     //   - In the case that we here notice the call is static bound we
 490     //     convert the call into what looks to be an optimized virtual call,
 491     //     but we must use the unverified entry point (since there will be no
 492     //     null check on a call when the target isn't loaded).
 493     //     This causes problems when verifying the IC because
 494     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
 495     //     is aware of this and weakens its asserts.
 496     if (is_optimized) {
 497       entry      = method_code->verified_entry_point();
 498     } else {
 499       entry      = method_code->entry_point();
 500     }
 501   }
 502   if (entry != NULL) {
 503     // Call to compiled code
 504     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
 505   } else {
































 506     if (is_optimized) {
 507       // Use stub entry
 508       info.set_interpreter_entry(method()->get_c2i_entry(), method());
 509     } else {
 510       // Use icholder entry
 511       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 512       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
 513       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 514     }
 515   }
 516   assert(info.is_optimized() == is_optimized, "must agree");
 517 }
 518 
 519 
 520 bool CompiledIC::is_icholder_entry(address entry) {
 521   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 522   return (cb != NULL && cb->is_adapter_blob());
 523 }
 524 
 525 // Release the CompiledICHolder* associated with this call site is there is one.


< prev index next >