1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "code/codeBehaviours.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/vtableStubs.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "interpreter/linkResolver.hpp"
  35 #include "memory/metadataFactory.hpp"
  36 #include "memory/oopFactory.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "oops/method.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "oops/symbol.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/icache.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "utilities/events.hpp"
  46 
  47 
  48 // Every time a compiled IC is changed or its type is being accessed,
  49 // either the CompiledIC_lock must be set or we must be at a safe point.
  50 
  51 CompiledICLocker::CompiledICLocker(CompiledMethod* method)
  52   : _method(method),
  53     _behaviour(CompiledICProtectionBehaviour::current()),
  54     _locked(_behaviour->lock(_method)),
  55     _nsv(true, !SafepointSynchronize::is_at_safepoint()) {
  56 }
  57 
  58 CompiledICLocker::~CompiledICLocker() {
  59   if (_locked) {
  60     _behaviour->unlock(_method);
  61   }
  62 }
  63 
  64 bool CompiledICLocker::is_safe(CompiledMethod* method) {
  65   return CompiledICProtectionBehaviour::current()->is_safe(method);
  66 }
  67 
  68 bool CompiledICLocker::is_safe(address code) {
  69   CodeBlob* cb = CodeCache::find_blob_unsafe(code);
  70   assert(cb != NULL && cb->is_compiled(), "must be compiled");
  71   CompiledMethod* cm = cb->as_compiled_method();
  72   return CompiledICProtectionBehaviour::current()->is_safe(cm);
  73 }
  74 
  75 //-----------------------------------------------------------------------------
  76 // Low-level access to an inline cache. Private, since they might not be
  77 // MT-safe to use.
  78 
  79 void* CompiledIC::cached_value() const {
  80   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
  81   assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
  82 
  83   if (!is_in_transition_state()) {
  84     void* data = get_data();
  85     // If we let the metadata value here be initialized to zero...
  86     assert(data != NULL || Universe::non_oop_word() == NULL,
  87            "no raw nulls in CompiledIC metadatas, because of patching races");
  88     return (data == (void*)Universe::non_oop_word()) ? NULL : data;
  89   } else {
  90     return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
  91   }
  92 }
  93 
  94 
  95 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
  96   assert(entry_point != NULL, "must set legal entry point");
  97   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
  98   assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
  99   assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
 100 
 101   assert(!is_icholder || is_icholder_entry(entry_point), "must be");
 102 
 103   // Don't use ic_destination for this test since that forwards
 104   // through ICBuffer instead of returning the actual current state of
 105   // the CompiledIC.
 106   if (is_icholder_entry(_call->destination())) {
 107     // When patching for the ICStub case the cached value isn't
 108     // overwritten until the ICStub copied into the CompiledIC during
 109     // the next safepoint.  Make sure that the CompiledICHolder* is
 110     // marked for release at this point since it won't be identifiable
 111     // once the entry point is overwritten.
 112     InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
 113   }
 114 
 115   if (TraceCompiledIC) {
 116     tty->print("  ");
 117     print_compiled_ic();
 118     tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
 119     if (!is_optimized()) {
 120       tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
 121     }
 122     if (is_icstub) {
 123       tty->print(" (icstub)");
 124     }
 125     tty->cr();
 126   }
 127 
 128   {
 129     CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
 130     assert(cb != NULL && cb->is_compiled(), "must be compiled");
 131     _call->set_destination_mt_safe(entry_point);
 132   }
 133 
 134   if (is_optimized() || is_icstub) {
 135     // Optimized call sites don't have a cache value and ICStub call
 136     // sites only change the entry point.  Changing the value in that
 137     // case could lead to MT safety issues.
 138     assert(cache == NULL, "must be null");
 139     return;
 140   }
 141 
 142   if (cache == NULL)  cache = (void*)Universe::non_oop_word();
 143 
 144   set_data((intptr_t)cache);
 145 }
 146 
 147 
 148 void CompiledIC::set_ic_destination(ICStub* stub) {
 149   internal_set_ic_destination(stub->code_begin(), true, NULL, false);
 150 }
 151 
 152 
 153 
 154 address CompiledIC::ic_destination() const {
 155   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 156   if (!is_in_transition_state()) {
 157     return _call->destination();
 158   } else {
 159     return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
 160   }
 161 }
 162 
 163 
 164 bool CompiledIC::is_in_transition_state() const {
 165   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 166   return InlineCacheBuffer::contains(_call->destination());;
 167 }
 168 
 169 
 170 bool CompiledIC::is_icholder_call() const {
 171   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 172   return !_is_optimized && is_icholder_entry(ic_destination());
 173 }
 174 
 175 // Returns native address of 'call' instruction in inline-cache. Used by
 176 // the InlineCacheBuffer when it needs to find the stub.
 177 address CompiledIC::stub_address() const {
 178   assert(is_in_transition_state(), "should only be called when we are in a transition state");
 179   return _call->destination();
 180 }
 181 
 182 // Clears the IC stub if the compiled IC is in transition state
 183 void CompiledIC::clear_ic_stub() {
 184   if (is_in_transition_state()) {
 185     ICStub* stub = ICStub_from_destination_address(stub_address());
 186     stub->clear();
 187   }
 188 }
 189 
 190 //-----------------------------------------------------------------------------
 191 // High-level access to an inline cache. Guaranteed to be MT-safe.
 192 
 193 void CompiledIC::initialize_from_iter(RelocIterator* iter) {
 194   assert(iter->addr() == _call->instruction_address(), "must find ic_call");
 195 
 196   if (iter->type() == relocInfo::virtual_call_type) {
 197     virtual_call_Relocation* r = iter->virtual_call_reloc();
 198     _is_optimized = false;
 199     _value = _call->get_load_instruction(r);
 200   } else {
 201     assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
 202     _is_optimized = true;
 203     _value = NULL;
 204   }
 205 }
 206 
 207 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
 208   : _method(cm)
 209 {
 210   _call = _method->call_wrapper_at((address) call);
 211   address ic_call = _call->instruction_address();
 212 
 213   assert(ic_call != NULL, "ic_call address must be set");
 214   assert(cm != NULL, "must pass compiled method");
 215   assert(cm->contains(ic_call), "must be in compiled method");
 216 
 217   // Search for the ic_call at the given address.
 218   RelocIterator iter(cm, ic_call, ic_call+1);
 219   bool ret = iter.next();
 220   assert(ret == true, "relocInfo must exist at this address");
 221   assert(iter.addr() == ic_call, "must find ic_call");
 222 
 223   initialize_from_iter(&iter);
 224 }
 225 
 226 CompiledIC::CompiledIC(RelocIterator* iter)
 227   : _method(iter->code())
 228 {
 229   _call = _method->call_wrapper_at(iter->addr());
 230   address ic_call = _call->instruction_address();
 231 
 232   CompiledMethod* nm = iter->code();
 233   assert(ic_call != NULL, "ic_call address must be set");
 234   assert(nm != NULL, "must pass compiled method");
 235   assert(nm->contains(ic_call), "must be in compiled method");
 236 
 237   initialize_from_iter(iter);
 238 }
 239 
 240 // This function may fail for two reasons: either due to running out of vtable
 241 // stubs, or due to running out of IC stubs in an attempted transition to a
 242 // transitional state. The needs_ic_stub_refill value will be set if the failure
 243 // was due to running out of IC stubs, in which case the caller will refill IC
 244 // stubs and retry.
 245 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
 246                                     bool& needs_ic_stub_refill, bool caller_is_c1, TRAPS) {
 247   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 248   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
 249   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 250 
 251   address entry;
 252   if (call_info->call_kind() == CallInfo::itable_call) {
 253     assert(bytecode == Bytecodes::_invokeinterface, "");
 254     int itable_index = call_info->itable_index();
 255     entry = VtableStubs::find_itable_stub(itable_index, caller_is_c1);
 256     if (entry == NULL) {
 257       return false;
 258     }
 259 #ifdef ASSERT
 260     int index = call_info->resolved_method()->itable_index();
 261     assert(index == itable_index, "CallInfo pre-computes this");
 262     InstanceKlass* k = call_info->resolved_method()->method_holder();
 263     assert(k->verify_itable_index(itable_index), "sanity check");
 264 #endif //ASSERT
 265     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
 266                                                     call_info->resolved_klass(), false);
 267     holder->claim();
 268     if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
 269       delete holder;
 270       needs_ic_stub_refill = true;
 271       return false;
 272     }
 273   } else {
 274     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
 275     // Can be different than selected_method->vtable_index(), due to package-private etc.
 276     int vtable_index = call_info->vtable_index();
 277     assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
 278     entry = VtableStubs::find_vtable_stub(vtable_index, caller_is_c1);
 279     if (entry == NULL) {
 280       return false;
 281     }
 282     if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
 283       needs_ic_stub_refill = true;
 284       return false;
 285     }
 286   }
 287 
 288   if (TraceICs) {
 289     ResourceMark rm;
 290     assert(!call_info->selected_method().is_null(), "Unexpected null selected method");
 291     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
 292                    p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
 293   }
 294 
 295   // We can't check this anymore. With lazy deopt we could have already
 296   // cleaned this IC entry before we even return. This is possible if
 297   // we ran out of space in the inline cache buffer trying to do the
 298   // set_next and we safepointed to free up space. This is a benign
 299   // race because the IC entry was complete when we safepointed so
 300   // cleaning it immediately is harmless.
 301   // assert(is_megamorphic(), "sanity check");
 302   return true;
 303 }
 304 
 305 
 306 // true if destination is megamorphic stub
 307 bool CompiledIC::is_megamorphic() const {
 308   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 309   assert(!is_optimized(), "an optimized call cannot be megamorphic");
 310 
 311   // Cannot rely on cached_value. It is either an interface or a method.
 312   return VtableStubs::entry_point(ic_destination()) != NULL;
 313 }
 314 
 315 bool CompiledIC::is_call_to_compiled() const {
 316   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 317 
 318   // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
 319   // method is guaranteed to still exist, since we only remove methods after all inline caches
 320   // has been cleaned up
 321   CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 322   bool is_monomorphic = (cb != NULL && cb->is_compiled());
 323   // Check that the cached_value is a klass for non-optimized monomorphic calls
 324   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
 325   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
 326   // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
 327   // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
 328 #ifdef ASSERT
 329   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
 330   bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
 331   assert( is_c1_or_jvmci_method ||
 332          !is_monomorphic ||
 333          is_optimized() ||
 334          !caller->is_alive() ||
 335          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
 336 #endif // ASSERT
 337   return is_monomorphic;
 338 }
 339 
 340 
 341 bool CompiledIC::is_call_to_interpreted() const {
 342   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 343   // Call to interpreter if destination is either calling to a stub (if it
 344   // is optimized), or calling to an I2C blob
 345   bool is_call_to_interpreted = false;
 346   if (!is_optimized()) {
 347     // must use unsafe because the destination can be a zombie (and we're cleaning)
 348     // and the print_compiled_ic code wants to know if site (in the non-zombie)
 349     // is to the interpreter.
 350     CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 351     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
 352     assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
 353   } else {
 354     // Check if we are calling into our own codeblob (i.e., to a stub)
 355     address dest = ic_destination();
 356 #ifdef ASSERT
 357     {
 358       _call->verify_resolve_call(dest);
 359     }
 360 #endif /* ASSERT */
 361     is_call_to_interpreted = _call->is_call_to_interpreted(dest);
 362   }
 363   return is_call_to_interpreted;
 364 }
 365 
 366 bool CompiledIC::set_to_clean(bool in_use) {
 367   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 368   if (TraceInlineCacheClearing || TraceICs) {
 369     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
 370     print();
 371   }
 372 
 373   address entry = _call->get_resolve_call_stub(is_optimized());
 374 
 375   // A zombie transition will always be safe, since the metadata has already been set to NULL, so
 376   // we only need to patch the destination
 377   bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
 378 
 379   if (safe_transition) {
 380     // Kill any leftover stub we might have too
 381     clear_ic_stub();
 382     if (is_optimized()) {
 383       set_ic_destination(entry);
 384     } else {
 385       set_ic_destination_and_value(entry, (void*)NULL);
 386     }
 387   } else {
 388     // Unsafe transition - create stub.
 389     if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
 390       return false;
 391     }
 392   }
 393   // We can't check this anymore. With lazy deopt we could have already
 394   // cleaned this IC entry before we even return. This is possible if
 395   // we ran out of space in the inline cache buffer trying to do the
 396   // set_next and we safepointed to free up space. This is a benign
 397   // race because the IC entry was complete when we safepointed so
 398   // cleaning it immediately is harmless.
 399   // assert(is_clean(), "sanity check");
 400   return true;
 401 }
 402 
 403 bool CompiledIC::is_clean() const {
 404   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 405   bool is_clean = false;
 406   address dest = ic_destination();
 407   is_clean = dest == _call->get_resolve_call_stub(is_optimized());
 408   assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
 409   return is_clean;
 410 }
 411 
 412 bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
 413   assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
 414   // Updating a cache to the wrong entry can cause bugs that are very hard
 415   // to track down - if cache entry gets invalid - we just clean it. In
 416   // this way it is always the same code path that is responsible for
 417   // updating and resolving an inline cache
 418   //
 419   // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
 420   // callsites. In addition ic_miss code will update a site to monomorphic if it determines
 421   // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
 422   //
 423   // In both of these cases the only thing being modifed is the jump/call target and these
 424   // transitions are mt_safe
 425 
 426   Thread *thread = Thread::current();
 427   if (info.to_interpreter() || info.to_aot()) {
 428     // Call to interpreter
 429     if (info.is_optimized() && is_optimized()) {
 430       assert(is_clean(), "unsafe IC path");
 431       // the call analysis (callee structure) specifies that the call is optimized
 432       // (either because of CHA or the static target is final)
 433       // At code generation time, this call has been emitted as static call
 434       // Call via stub
 435       assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
 436       methodHandle method (thread, (Method*)info.cached_metadata());
 437       _call->set_to_interpreted(method, info);
 438 
 439       if (TraceICs) {
 440          ResourceMark rm(thread);
 441          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
 442            p2i(instruction_address()),
 443            (info.to_aot() ? "aot" : "interpreter"),
 444            method->print_value_string());
 445       }
 446     } else {
 447       // Call via method-klass-holder
 448       CompiledICHolder* holder = info.claim_cached_icholder();
 449       if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
 450         delete holder;
 451         return false;
 452       }
 453       if (TraceICs) {
 454          ResourceMark rm(thread);
 455          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
 456       }
 457     }
 458   } else {
 459     // Call to compiled code
 460     bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
 461 #ifdef ASSERT
 462     CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
 463     assert (cb != NULL && cb->is_compiled(), "must be compiled!");
 464 #endif /* ASSERT */
 465 
 466     // This is MT safe if we come from a clean-cache and go through a
 467     // non-verified entry point
 468     bool safe = SafepointSynchronize::is_at_safepoint() ||
 469                 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
 470 
 471     if (!safe) {
 472       if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
 473         return false;
 474       }
 475     } else {
 476       if (is_optimized()) {
 477         set_ic_destination(info.entry());
 478       } else {
 479         set_ic_destination_and_value(info.entry(), info.cached_metadata());
 480       }
 481     }
 482 
 483     if (TraceICs) {
 484       ResourceMark rm(thread);
 485       assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
 486       tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s",
 487         p2i(instruction_address()),
 488         (info.cached_metadata() != NULL) ? ((Klass*)info.cached_metadata())->print_value_string() : "NULL",
 489         (safe) ? "" : " via stub");
 490     }
 491   }
 492   // We can't check this anymore. With lazy deopt we could have already
 493   // cleaned this IC entry before we even return. This is possible if
 494   // we ran out of space in the inline cache buffer trying to do the
 495   // set_next and we safepointed to free up space. This is a benign
 496   // race because the IC entry was complete when we safepointed so
 497   // cleaning it immediately is harmless.
 498   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 499   return true;
 500 }
 501 
 502 
 503 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
 504 // static_bound: The call can be static bound. If it isn't also optimized, the property
 505 // wasn't provable at time of compilation. An optimized call will have any necessary
 506 // null check, while a static_bound won't. A static_bound (but not optimized) must
 507 // therefore use the unverified entry point.
 508 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 509                                            Klass* receiver_klass,
 510                                            bool is_optimized,
 511                                            bool static_bound,
 512                                            bool caller_is_nmethod,
 513                                            bool caller_is_c1,
 514                                            CompiledICInfo& info,
 515                                            TRAPS) {
 516   CompiledMethod* method_code = method->code();
 517 
 518   address entry = NULL;
 519   if (method_code != NULL && method_code->is_in_use()) {
 520     assert(method_code->is_compiled(), "must be compiled");
 521     // Call to compiled code
 522     //
 523     // Note: the following problem exists with Compiler1:
 524     //   - at compile time we may or may not know if the destination is final
 525     //   - if we know that the destination is final (is_optimized), we will emit
 526     //     an optimized virtual call (no inline cache), and need a Method* to make
 527     //     a call to the interpreter
 528     //   - if we don't know if the destination is final, we emit a standard
 529     //     virtual call, and use CompiledICHolder to call interpreted code
 530     //     (no static call stub has been generated)
 531     //   - In the case that we here notice the call is static bound we
 532     //     convert the call into what looks to be an optimized virtual call,
 533     //     but we must use the unverified entry point (since there will be no
 534     //     null check on a call when the target isn't loaded).
 535     //     This causes problems when verifying the IC because
 536     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
 537     //     is aware of this and weakens its asserts.
 538     if (is_optimized) {
 539       entry      = method_code->verified_entry_point();
 540     } else {
 541       entry      = method_code->entry_point();
 542     }
 543   }
 544   bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
 545   if (entry != NULL && !far_c2a) {
 546     // Call to near compiled code (nmethod or aot).
 547     info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
 548   } else {
 549     if (is_optimized) {
 550       if (far_c2a) {
 551         // Call to aot code from nmethod.
 552         info.set_aot_entry(entry, method());
 553       } else {
 554         // Use stub entry
 555         address entry = caller_is_c1 ? method()->get_c2i_value_entry() : method()->get_c2i_entry();
 556         info.set_interpreter_entry(entry, method());
 557       }
 558     } else {
 559       // Use icholder entry
 560       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 561       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
 562       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 563     }
 564   }
 565   assert(info.is_optimized() == is_optimized, "must agree");
 566 }
 567 
 568 
 569 bool CompiledIC::is_icholder_entry(address entry) {
 570   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 571   if (cb != NULL && cb->is_adapter_blob()) {
 572     return true;
 573   }
 574   // itable stubs also use CompiledICHolder
 575   if (cb != NULL && cb->is_vtable_blob()) {
 576     VtableStub* s = VtableStubs::entry_point(entry);
 577     return (s != NULL) && s->is_itable_stub();
 578   }
 579 
 580   return false;
 581 }
 582 
 583 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 584   // This call site might have become stale so inspect it carefully.
 585   address dest = cm->call_wrapper_at(call_site->addr())->destination();
 586   return is_icholder_entry(dest);
 587 }
 588 
 589 // ----------------------------------------------------------------------------
 590 
 591 bool CompiledStaticCall::set_to_clean(bool in_use) {
 592   // in_use is unused but needed to match template function in CompiledMethod
 593   assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
 594   // Reset call site
 595   set_destination_mt_safe(resolve_call_stub());
 596 
 597   // Do not reset stub here:  It is too expensive to call find_stub.
 598   // Instead, rely on caller (nmethod::clear_inline_caches) to clear
 599   // both the call and its stub.
 600   return true;
 601 }
 602 
 603 bool CompiledStaticCall::is_clean() const {
 604   return destination() == resolve_call_stub();
 605 }
 606 
 607 bool CompiledStaticCall::is_call_to_compiled() const {
 608   return CodeCache::contains(destination());
 609 }
 610 
 611 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
 612   // It is a call to interpreted, if it calls to a stub. Hence, the destination
 613   // must be in the stub part of the nmethod that contains the call
 614   CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
 615   return cm->stub_contains(destination());
 616 }
 617 
 618 bool CompiledDirectStaticCall::is_call_to_far() const {
 619   // It is a call to aot method, if it calls to a stub. Hence, the destination
 620   // must be in the stub part of the nmethod that contains the call
 621   CodeBlob* desc = CodeCache::find_blob(instruction_address());
 622   return desc->as_compiled_method()->stub_contains(destination());
 623 }
 624 
 625 void CompiledStaticCall::set_to_compiled(address entry) {
 626   if (TraceICs) {
 627     ResourceMark rm;
 628     tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
 629         name(),
 630         p2i(instruction_address()),
 631         p2i(entry));
 632   }
 633   // Call to compiled code
 634   assert(CodeCache::contains(entry), "wrong entry point");
 635   set_destination_mt_safe(entry);
 636 }
 637 
 638 void CompiledStaticCall::set(const StaticCallInfo& info) {
 639   assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
 640   // Updating a cache to the wrong entry can cause bugs that are very hard
 641   // to track down - if cache entry gets invalid - we just clean it. In
 642   // this way it is always the same code path that is responsible for
 643   // updating and resolving an inline cache
 644   assert(is_clean(), "do not update a call entry - use clean");
 645 
 646   if (info._to_interpreter) {
 647     // Call to interpreted code
 648     set_to_interpreted(info.callee(), info.entry());
 649 #if INCLUDE_AOT
 650   } else if (info._to_aot) {
 651     // Call to far code
 652     set_to_far(info.callee(), info.entry());
 653 #endif
 654   } else {
 655     set_to_compiled(info.entry());
 656   }
 657 }
 658 
 659 // Compute settings for a CompiledStaticCall. Since we might have to set
 660 // the stub when calling to the interpreter, we need to return arguments.
 661 void CompiledStaticCall::compute_entry(const methodHandle& m, CompiledMethod* caller_nm, StaticCallInfo& info) {
 662   bool caller_is_nmethod = caller_nm->is_nmethod();
 663   CompiledMethod* m_code = m->code();
 664   info._callee = m;
 665   if (m_code != NULL && m_code->is_in_use()) {
 666     if (caller_is_nmethod && m_code->is_far_code()) {
 667       // Call to far aot code from nmethod.
 668       info._to_aot = true;
 669     } else {
 670       info._to_aot = false;
 671     }
 672     info._to_interpreter = false;
 673     if (caller_nm->is_c1()) {
 674       info._entry = m_code->verified_value_entry_point();
 675     } else {
 676       info._entry = m_code->verified_entry_point();
 677     }
 678   } else {
 679     // Callee is interpreted code.  In any case entering the interpreter
 680     // puts a converter-frame on the stack to save arguments.
 681     assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
 682     info._to_interpreter = true;
 683 
 684     if (caller_nm->is_c1()) {
 685       // C1 -> interp: values passed as oops
 686       info._entry = m()->get_c2i_value_entry();
 687     } else {
 688       // C2 -> interp: values passed fields
 689       info._entry = m()->get_c2i_entry();
 690     }
 691   }
 692 }
 693 
 694 address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
 695   // Find reloc. information containing this call-site
 696   RelocIterator iter((nmethod*)NULL, instruction);
 697   while (iter.next()) {
 698     if (iter.addr() == instruction) {
 699       switch(iter.type()) {
 700         case relocInfo::static_call_type:
 701           return iter.static_call_reloc()->static_stub(is_aot);
 702         // We check here for opt_virtual_call_type, since we reuse the code
 703         // from the CompiledIC implementation
 704         case relocInfo::opt_virtual_call_type:
 705           return iter.opt_virtual_call_reloc()->static_stub(is_aot);
 706         case relocInfo::poll_type:
 707         case relocInfo::poll_return_type: // A safepoint can't overlap a call.
 708         default:
 709           ShouldNotReachHere();
 710       }
 711     }
 712   }
 713   return NULL;
 714 }
 715 
 716 address CompiledDirectStaticCall::find_stub(bool is_aot) {
 717   return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
 718 }
 719 
 720 address CompiledDirectStaticCall::resolve_call_stub() const {
 721   return SharedRuntime::get_resolve_static_call_stub();
 722 }
 723 
 724 //-----------------------------------------------------------------------------
 725 // Non-product mode code
 726 #ifndef PRODUCT
 727 
 728 void CompiledIC::verify() {
 729   _call->verify();
 730   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
 731           || is_optimized() || is_megamorphic(), "sanity check");
 732 }
 733 
 734 void CompiledIC::print() {
 735   print_compiled_ic();
 736   tty->cr();
 737 }
 738 
 739 void CompiledIC::print_compiled_ic() {
 740   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
 741              p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
 742 }
 743 
 744 void CompiledDirectStaticCall::print() {
 745   tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
 746   if (is_clean()) {
 747     tty->print("clean");
 748   } else if (is_call_to_compiled()) {
 749     tty->print("compiled");
 750   } else if (is_call_to_far()) {
 751     tty->print("far");
 752   } else if (is_call_to_interpreted()) {
 753     tty->print("interpreted");
 754   }
 755   tty->cr();
 756 }
 757 
 758 #endif // !PRODUCT