458 // cleaning it immediately is harmless.
459 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
460 }
461
462
463 // is_optimized: Compiler has generated an optimized call (i.e., no inline
464 // cache) static_bound: The call can be static bound (i.e, no need to use
465 // inline cache)
466 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
467 KlassHandle receiver_klass,
468 bool is_optimized,
469 bool static_bound,
470 CompiledICInfo& info,
471 TRAPS) {
472 CompiledMethod* method_code = method->code();
473
474 address entry = NULL;
475 if (method_code != NULL && method_code->is_in_use()) {
476 assert(method_code->is_compiled(), "must be compiled");
477 // Call to compiled code
478 if (static_bound || is_optimized) {
479 entry = method_code->verified_entry_point();
480 } else {
481 entry = method_code->entry_point();
482 }
483 }
484 if (entry != NULL) {
485 // Call to compiled code
486 info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
487 } else {
488 // Note: the following problem exists with Compiler1:
489 // - at compile time we may or may not know if the destination is final
490 // - if we know that the destination is final, we will emit an optimized
491 // virtual call (no inline cache), and need a Method* to make a call
492 // to the interpreter
493 // - if we do not know if the destination is final, we emit a standard
494 // virtual call, and use CompiledICHolder to call interpreted code
495 // (no static call stub has been generated)
496 // However in that case we will now notice it is static_bound
497 // and convert the call into what looks to be an optimized
498 // virtual call. This causes problems in verifying the IC because
|
458 // cleaning it immediately is harmless.
459 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
460 }
461
462
463 // is_optimized: Compiler has generated an optimized call (i.e., no inline
464 // cache) static_bound: The call can be static bound (i.e, no need to use
465 // inline cache)
466 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
467 KlassHandle receiver_klass,
468 bool is_optimized,
469 bool static_bound,
470 CompiledICInfo& info,
471 TRAPS) {
472 CompiledMethod* method_code = method->code();
473
474 address entry = NULL;
475 if (method_code != NULL && method_code->is_in_use()) {
476 assert(method_code->is_compiled(), "must be compiled");
477 // Call to compiled code
478 if (is_optimized) {
479 entry = method_code->verified_entry_point();
480 } else {
481 entry = method_code->entry_point();
482 }
483 }
484 if (entry != NULL) {
485 // Call to compiled code
486 info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
487 } else {
488 // Note: the following problem exists with Compiler1:
489 // - at compile time we may or may not know if the destination is final
490 // - if we know that the destination is final, we will emit an optimized
491 // virtual call (no inline cache), and need a Method* to make a call
492 // to the interpreter
493 // - if we do not know if the destination is final, we emit a standard
494 // virtual call, and use CompiledICHolder to call interpreted code
495 // (no static call stub has been generated)
496 // However in that case we will now notice it is static_bound
497 // and convert the call into what looks to be an optimized
498 // virtual call. This causes problems in verifying the IC because
|