< prev index next >

src/share/vm/code/compiledIC.cpp

Print this page




 440         ((Klass*)info.cached_metadata())->print_value_string(),
 441         (safe) ? "" : "via stub");
 442     }
 443   }
 444   // We can't check this anymore. With lazy deopt we could have already
 445   // cleaned this IC entry before we even return. This is possible if
 446   // we ran out of space in the inline cache buffer trying to do the
 447   // set_next and we safepointed to free up space. This is a benign
 448   // race because the IC entry was complete when we safepointed so
 449   // cleaning it immediately is harmless.
 450   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 451 }
 452 
 453 
 454 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
 455 // static_bound: The call can be static bound. If it isn't also optimized, the property
 456 // wasn't provable at time of compilation. An optimized call will have any necessary
 457 // null check, while a static_bound won't. A static_bound (but not optimized) must
 458 // therefore use the unverified entry point.
 459 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 460                                            KlassHandle receiver_klass,
 461                                            bool is_optimized,
 462                                            bool static_bound,
 463                                            bool caller_is_nmethod,
 464                                            CompiledICInfo& info,
 465                                            TRAPS) {
 466   CompiledMethod* method_code = method->code();
 467 
 468   address entry = NULL;
 469   if (method_code != NULL && method_code->is_in_use()) {
 470     assert(method_code->is_compiled(), "must be compiled");
 471     // Call to compiled code
 472     //
 473     // Note: the following problem exists with Compiler1:
 474     //   - at compile time we may or may not know if the destination is final
 475     //   - if we know that the destination is final (is_optimized), we will emit
 476     //     an optimized virtual call (no inline cache), and need a Method* to make
 477     //     a call to the interpreter
 478     //   - if we don't know if the destination is final, we emit a standard
 479     //     virtual call, and use CompiledICHolder to call interpreted code
 480     //     (no static call stub has been generated)
 481     //   - In the case that we here notice the call is static bound we
 482     //     convert the call into what looks to be an optimized virtual call,
 483     //     but we must use the unverified entry point (since there will be no
 484     //     null check on a call when the target isn't loaded).
 485     //     This causes problems when verifying the IC because
 486     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
 487     //     is aware of this and weakens its asserts.
 488     if (is_optimized) {
 489       entry      = method_code->verified_entry_point();
 490     } else {
 491       entry      = method_code->entry_point();
 492     }
 493   }
 494   bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
 495   if (entry != NULL && !far_c2a) {
 496     // Call to near compiled code (nmethod or aot).
 497     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
 498   } else {
 499     if (is_optimized) {
 500       if (far_c2a) {
 501         // Call to aot code from nmethod.
 502         info.set_aot_entry(entry, method());
 503       } else {
 504         // Use stub entry
 505         info.set_interpreter_entry(method()->get_c2i_entry(), method());
 506       }
 507     } else {
 508       // Use icholder entry
 509       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 510       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
 511       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 512     }
 513   }
 514   assert(info.is_optimized() == is_optimized, "must agree");
 515 }
 516 
 517 
 518 bool CompiledIC::is_icholder_entry(address entry) {
 519   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 520   return (cb != NULL && cb->is_adapter_blob());
 521 }
 522 
 523 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 524   // This call site might have become stale so inspect it carefully.
 525   address dest = cm->call_wrapper_at(call_site->addr())->destination();
 526   return is_icholder_entry(dest);
 527 }
 528 
 529 // Release the CompiledICHolder* associated with this call site is there is one.
 530 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {




 440         ((Klass*)info.cached_metadata())->print_value_string(),
 441         (safe) ? "" : "via stub");
 442     }
 443   }
 444   // We can't check this anymore. With lazy deopt we could have already
 445   // cleaned this IC entry before we even return. This is possible if
 446   // we ran out of space in the inline cache buffer trying to do the
 447   // set_next and we safepointed to free up space. This is a benign
 448   // race because the IC entry was complete when we safepointed so
 449   // cleaning it immediately is harmless.
 450   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 451 }
 452 
 453 
 454 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
 455 // static_bound: The call can be static bound. If it isn't also optimized, the property
 456 // wasn't provable at time of compilation. An optimized call will have any necessary
 457 // null check, while a static_bound won't. A static_bound (but not optimized) must
 458 // therefore use the unverified entry point.
 459 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 460                                            Klass* receiver_klass,
 461                                            bool is_optimized,
 462                                            bool static_bound,
 463                                            bool caller_is_nmethod,
 464                                            CompiledICInfo& info,
 465                                            TRAPS) {
 466   CompiledMethod* method_code = method->code();
 467 
 468   address entry = NULL;
 469   if (method_code != NULL && method_code->is_in_use()) {
 470     assert(method_code->is_compiled(), "must be compiled");
 471     // Call to compiled code
 472     //
 473     // Note: the following problem exists with Compiler1:
 474     //   - at compile time we may or may not know if the destination is final
 475     //   - if we know that the destination is final (is_optimized), we will emit
 476     //     an optimized virtual call (no inline cache), and need a Method* to make
 477     //     a call to the interpreter
 478     //   - if we don't know if the destination is final, we emit a standard
 479     //     virtual call, and use CompiledICHolder to call interpreted code
 480     //     (no static call stub has been generated)
 481     //   - In the case that we here notice the call is static bound we
 482     //     convert the call into what looks to be an optimized virtual call,
 483     //     but we must use the unverified entry point (since there will be no
 484     //     null check on a call when the target isn't loaded).
 485     //     This causes problems when verifying the IC because
 486     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
 487     //     is aware of this and weakens its asserts.
 488     if (is_optimized) {
 489       entry      = method_code->verified_entry_point();
 490     } else {
 491       entry      = method_code->entry_point();
 492     }
 493   }
 494   bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
 495   if (entry != NULL && !far_c2a) {
 496     // Call to near compiled code (nmethod or aot).
 497     info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass, is_optimized);
 498   } else {
 499     if (is_optimized) {
 500       if (far_c2a) {
 501         // Call to aot code from nmethod.
 502         info.set_aot_entry(entry, method());
 503       } else {
 504         // Use stub entry
 505         info.set_interpreter_entry(method()->get_c2i_entry(), method());
 506       }
 507     } else {
 508       // Use icholder entry
 509       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 510       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
 511       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 512     }
 513   }
 514   assert(info.is_optimized() == is_optimized, "must agree");
 515 }
 516 
 517 
 518 bool CompiledIC::is_icholder_entry(address entry) {
 519   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 520   return (cb != NULL && cb->is_adapter_blob());
 521 }
 522 
 523 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 524   // This call site might have become stale so inspect it carefully.
 525   address dest = cm->call_wrapper_at(call_site->addr())->destination();
 526   return is_icholder_entry(dest);
 527 }
 528 
 529 // Release the CompiledICHolder* associated with this call site is there is one.
 530 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {


< prev index next >