1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nmethod.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/linkResolver.hpp"
  34 #include "memory/metadataFactory.hpp"
  35 #include "memory/oopFactory.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/method.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/symbol.hpp"
  40 #include "runtime/icache.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/stubRoutines.hpp"
  43 #include "utilities/events.hpp"
  44 
  45 
  46 // Every time a compiled IC is changed or its type is being accessed,
  47 // either the CompiledIC_lock must be set or we must be at a safe point.
  48 
  49 //-----------------------------------------------------------------------------
  50 // Low-level access to an inline cache. Private, since they might not be
  51 // MT-safe to use.
  52 
  53 void* CompiledIC::cached_value() const {
  54   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  55   assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
  56 
  57   if (!is_in_transition_state()) {
  58     void* data = get_data();
  59     // If we let the metadata value here be initialized to zero...
  60     assert(data != NULL || Universe::non_oop_word() == NULL,
  61            "no raw nulls in CompiledIC metadatas, because of patching races");
  62     return (data == (void*)Universe::non_oop_word()) ? NULL : data;
  63   } else {
  64     return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
  65   }
  66 }
  67 
  68 
  69 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
  70   assert(entry_point != NULL, "must set legal entry point");
  71   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  72   assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
  73   assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
  74 
  75   assert(!is_icholder || is_icholder_entry(entry_point), "must be");
  76 
  77   // Don't use ic_destination for this test since that forwards
  78   // through ICBuffer instead of returning the actual current state of
  79   // the CompiledIC.
  80   if (is_icholder_entry(_call->destination())) {
  81     // When patching for the ICStub case the cached value isn't
  82     // overwritten until the ICStub copied into the CompiledIC during
  83     // the next safepoint.  Make sure that the CompiledICHolder* is
  84     // marked for release at this point since it won't be identifiable
  85     // once the entry point is overwritten.
  86     InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
  87   }
  88 
  89   if (TraceCompiledIC) {
  90     tty->print("  ");
  91     print_compiled_ic();
  92     tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
  93     if (!is_optimized()) {
  94       tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
  95     }
  96     if (is_icstub) {
  97       tty->print(" (icstub)");
  98     }
  99     tty->cr();
 100   }
 101 
 102   {
 103     MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 104 #ifdef ASSERT
 105     CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
 106     assert(cb != NULL && cb->is_compiled(), "must be compiled");
 107 #endif
 108     _call->set_destination_mt_safe(entry_point);
 109   }
 110 
 111   if (is_optimized() || is_icstub) {
 112     // Optimized call sites don't have a cache value and ICStub call
 113     // sites only change the entry point.  Changing the value in that
 114     // case could lead to MT safety issues.
 115     assert(cache == NULL, "must be null");
 116     return;
 117   }
 118 
 119   if (cache == NULL)  cache = (void*)Universe::non_oop_word();
 120 
 121   set_data((intptr_t)cache);
 122 }
 123 
 124 
 125 void CompiledIC::set_ic_destination(ICStub* stub) {
 126   internal_set_ic_destination(stub->code_begin(), true, NULL, false);
 127 }
 128 
 129 
 130 
 131 address CompiledIC::ic_destination() const {
 132  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 133  if (!is_in_transition_state()) {
 134    return _call->destination();
 135  } else {
 136    return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
 137  }
 138 }
 139 
 140 
 141 bool CompiledIC::is_in_transition_state() const {
 142   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 143   return InlineCacheBuffer::contains(_call->destination());;
 144 }
 145 
 146 
 147 bool CompiledIC::is_icholder_call() const {
 148   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 149   return !_is_optimized && is_icholder_entry(ic_destination());
 150 }
 151 
 152 // Returns native address of 'call' instruction in inline-cache. Used by
 153 // the InlineCacheBuffer when it needs to find the stub.
 154 address CompiledIC::stub_address() const {
 155   assert(is_in_transition_state(), "should only be called when we are in a transition state");
 156   return _call->destination();
 157 }
 158 
 159 // Clears the IC stub if the compiled IC is in transition state
 160 void CompiledIC::clear_ic_stub() {
 161   if (is_in_transition_state()) {
 162     ICStub* stub = ICStub_from_destination_address(stub_address());
 163     stub->clear();
 164   }
 165 }
 166 
 167 //-----------------------------------------------------------------------------
 168 // High-level access to an inline cache. Guaranteed to be MT-safe.
 169 
 170 void CompiledIC::initialize_from_iter(RelocIterator* iter) {
 171   assert(iter->addr() == _call->instruction_address(), "must find ic_call");
 172 
 173   if (iter->type() == relocInfo::virtual_call_type) {
 174     virtual_call_Relocation* r = iter->virtual_call_reloc();
 175     _is_optimized = false;
 176     _value = _call->get_load_instruction(r);
 177   } else {
 178     assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
 179     _is_optimized = true;
 180     _value = NULL;
 181   }
 182 }
 183 
 184 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
 185   : _method(cm)
 186 {
 187   _call = _method->call_wrapper_at((address) call);
 188   address ic_call = _call->instruction_address();
 189 
 190   assert(ic_call != NULL, "ic_call address must be set");
 191   assert(cm != NULL, "must pass compiled method");
 192   assert(cm->contains(ic_call), "must be in compiled method");
 193 
 194   // Search for the ic_call at the given address.
 195   RelocIterator iter(cm, ic_call, ic_call+1);
 196   bool ret = iter.next();
 197   assert(ret == true, "relocInfo must exist at this address");
 198   assert(iter.addr() == ic_call, "must find ic_call");
 199 
 200   initialize_from_iter(&iter);
 201 }
 202 
 203 CompiledIC::CompiledIC(RelocIterator* iter)
 204   : _method(iter->code())
 205 {
 206   _call = _method->call_wrapper_at(iter->addr());
 207   address ic_call = _call->instruction_address();
 208 
 209   CompiledMethod* nm = iter->code();
 210   assert(ic_call != NULL, "ic_call address must be set");
 211   assert(nm != NULL, "must pass compiled method");
 212   assert(nm->contains(ic_call), "must be in compiled method");
 213 
 214   initialize_from_iter(iter);
 215 }
 216 
 217 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
 218   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 219   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
 220   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 221 
 222   address entry;
 223   if (call_info->call_kind() == CallInfo::itable_call) {
 224     assert(bytecode == Bytecodes::_invokeinterface, "");
 225     int itable_index = call_info->itable_index();
 226     entry = VtableStubs::find_itable_stub(itable_index);
 227     if (entry == NULL) {
 228       return false;
 229     }
 230 #ifdef ASSERT
 231     int index = call_info->resolved_method()->itable_index();
 232     assert(index == itable_index, "CallInfo pre-computes this");
 233     InstanceKlass* k = call_info->resolved_method()->method_holder();
 234     assert(k->verify_itable_index(itable_index), "sanity check");
 235 #endif //ASSERT
 236     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
 237                                                     call_info->resolved_klass());
 238     holder->claim();
 239     InlineCacheBuffer::create_transition_stub(this, holder, entry);
 240   } else {
 241     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
 242     // Can be different than selected_method->vtable_index(), due to package-private etc.
 243     int vtable_index = call_info->vtable_index();
 244     assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
 245     entry = VtableStubs::find_vtable_stub(vtable_index);
 246     if (entry == NULL) {
 247       return false;
 248     }
 249     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
 250   }
 251 
 252   if (TraceICs) {
 253     ResourceMark rm;
 254     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
 255                    p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
 256   }
 257 
 258   // We can't check this anymore. With lazy deopt we could have already
 259   // cleaned this IC entry before we even return. This is possible if
 260   // we ran out of space in the inline cache buffer trying to do the
 261   // set_next and we safepointed to free up space. This is a benign
 262   // race because the IC entry was complete when we safepointed so
 263   // cleaning it immediately is harmless.
 264   // assert(is_megamorphic(), "sanity check");
 265   return true;
 266 }
 267 
 268 
 269 // true if destination is megamorphic stub
 270 bool CompiledIC::is_megamorphic() const {
 271   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 272   assert(!is_optimized(), "an optimized call cannot be megamorphic");
 273 
 274   // Cannot rely on cached_value. It is either an interface or a method.
 275   return VtableStubs::is_entry_point(ic_destination());
 276 }
 277 
 278 bool CompiledIC::is_call_to_compiled() const {
 279   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 280 
 281   // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
 282   // method is guaranteed to still exist, since we only remove methods after all inline caches
 283   // has been cleaned up
 284   CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 285   bool is_monomorphic = (cb != NULL && cb->is_compiled());
 286   // Check that the cached_value is a klass for non-optimized monomorphic calls
 287   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
 288   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
 289   // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
 290   // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
 291 #ifdef ASSERT
 292   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
 293   bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
 294   assert( is_c1_or_jvmci_method ||
 295          !is_monomorphic ||
 296          is_optimized() ||
 297          !caller->is_alive() ||
 298          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
 299 #endif // ASSERT
 300   return is_monomorphic;
 301 }
 302 
 303 
 304 bool CompiledIC::is_call_to_interpreted() const {
 305   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 306   // Call to interpreter if destination is either calling to a stub (if it
 307   // is optimized), or calling to an I2C blob
 308   bool is_call_to_interpreted = false;
 309   if (!is_optimized()) {
 310     // must use unsafe because the destination can be a zombie (and we're cleaning)
 311     // and the print_compiled_ic code wants to know if site (in the non-zombie)
 312     // is to the interpreter.
 313     CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 314     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
 315     assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
 316   } else {
 317     // Check if we are calling into our own codeblob (i.e., to a stub)
 318     address dest = ic_destination();
 319 #ifdef ASSERT
 320     {
 321       _call->verify_resolve_call(dest);
 322     }
 323 #endif /* ASSERT */
 324     is_call_to_interpreted = _call->is_call_to_interpreted(dest);
 325   }
 326   return is_call_to_interpreted;
 327 }
 328 
 329 void CompiledIC::set_to_clean(bool in_use) {
 330   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
 331   if (TraceInlineCacheClearing || TraceICs) {
 332     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
 333     print();
 334   }
 335 
 336   address entry = _call->get_resolve_call_stub(is_optimized());
 337 
 338   // A zombie transition will always be safe, since the metadata has already been set to NULL, so
 339   // we only need to patch the destination
 340   bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
 341 
 342   if (safe_transition) {
 343     // Kill any leftover stub we might have too
 344     clear_ic_stub();
 345     if (is_optimized()) {
 346       set_ic_destination(entry);
 347     } else {
 348       set_ic_destination_and_value(entry, (void*)NULL);
 349     }
 350   } else {
 351     // Unsafe transition - create stub.
 352     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
 353   }
 354   // We can't check this anymore. With lazy deopt we could have already
 355   // cleaned this IC entry before we even return. This is possible if
 356   // we ran out of space in the inline cache buffer trying to do the
 357   // set_next and we safepointed to free up space. This is a benign
 358   // race because the IC entry was complete when we safepointed so
 359   // cleaning it immediately is harmless.
 360   // assert(is_clean(), "sanity check");
 361 }
 362 
 363 bool CompiledIC::is_clean() const {
 364   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 365   bool is_clean = false;
 366   address dest = ic_destination();
 367   is_clean = dest == _call->get_resolve_call_stub(is_optimized());
 368   assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
 369   return is_clean;
 370 }
 371 
 372 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
 373   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 374   // Updating a cache to the wrong entry can cause bugs that are very hard
 375   // to track down - if cache entry gets invalid - we just clean it. In
 376   // this way it is always the same code path that is responsible for
 377   // updating and resolving an inline cache
 378   //
 379   // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
 380   // callsites. In addition ic_miss code will update a site to monomorphic if it determines
 381   // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
 382   //
 383   // In both of these cases the only thing being modifed is the jump/call target and these
 384   // transitions are mt_safe
 385 
 386   Thread *thread = Thread::current();
 387   if (info.to_interpreter() || info.to_aot()) {
 388     // Call to interpreter
 389     if (info.is_optimized() && is_optimized()) {
 390        assert(is_clean(), "unsafe IC path");
 391        MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 392       // the call analysis (callee structure) specifies that the call is optimized
 393       // (either because of CHA or the static target is final)
 394       // At code generation time, this call has been emitted as static call
 395       // Call via stub
 396       assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
 397       methodHandle method (thread, (Method*)info.cached_metadata());
 398       _call->set_to_interpreted(method, info);
 399 
 400       if (TraceICs) {
 401          ResourceMark rm(thread);
 402          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
 403            p2i(instruction_address()),
 404            (info.to_aot() ? "aot" : "interpreter"),
 405            method->print_value_string());
 406       }
 407     } else {
 408       // Call via method-klass-holder
 409       InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
 410       if (TraceICs) {
 411          ResourceMark rm(thread);
 412          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
 413       }
 414     }
 415   } else {
 416     // Call to compiled code
 417     bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
 418 #ifdef ASSERT
 419     CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
 420     assert (cb != NULL && cb->is_compiled(), "must be compiled!");
 421 #endif /* ASSERT */
 422 
 423     // This is MT safe if we come from a clean-cache and go through a
 424     // non-verified entry point
 425     bool safe = SafepointSynchronize::is_at_safepoint() ||
 426                 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
 427 
 428     if (!safe) {
 429       InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
 430     } else {
 431       if (is_optimized()) {
 432         set_ic_destination(info.entry());
 433       } else {
 434         set_ic_destination_and_value(info.entry(), info.cached_metadata());
 435       }
 436     }
 437 
 438     if (TraceICs) {
 439       ResourceMark rm(thread);
 440       assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
 441       tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
 442         p2i(instruction_address()),
 443         ((Klass*)info.cached_metadata())->print_value_string(),
 444         (safe) ? "" : "via stub");
 445     }
 446   }
 447   // We can't check this anymore. With lazy deopt we could have already
 448   // cleaned this IC entry before we even return. This is possible if
 449   // we ran out of space in the inline cache buffer trying to do the
 450   // set_next and we safepointed to free up space. This is a benign
 451   // race because the IC entry was complete when we safepointed so
 452   // cleaning it immediately is harmless.
 453   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 454 }
 455 
 456 
 457 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
 458 // static_bound: The call can be static bound. If it isn't also optimized, the property
 459 // wasn't provable at time of compilation. An optimized call will have any necessary
 460 // null check, while a static_bound won't. A static_bound (but not optimized) must
 461 // therefore use the unverified entry point.
 462 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 463                                            Klass* receiver_klass,
 464                                            bool is_optimized,
 465                                            bool static_bound,
 466                                            bool caller_is_nmethod,
 467                                            CompiledICInfo& info,
 468                                            TRAPS) {
 469   CompiledMethod* method_code = method->code();
 470 
 471   address entry = NULL;
 472   if (method_code != NULL && method_code->is_in_use()) {
 473     assert(method_code->is_compiled(), "must be compiled");
 474     // Call to compiled code
 475     //
 476     // Note: the following problem exists with Compiler1:
 477     //   - at compile time we may or may not know if the destination is final
 478     //   - if we know that the destination is final (is_optimized), we will emit
 479     //     an optimized virtual call (no inline cache), and need a Method* to make
 480     //     a call to the interpreter
 481     //   - if we don't know if the destination is final, we emit a standard
 482     //     virtual call, and use CompiledICHolder to call interpreted code
 483     //     (no static call stub has been generated)
 484     //   - In the case that we here notice the call is static bound we
 485     //     convert the call into what looks to be an optimized virtual call,
 486     //     but we must use the unverified entry point (since there will be no
 487     //     null check on a call when the target isn't loaded).
 488     //     This causes problems when verifying the IC because
 489     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
 490     //     is aware of this and weakens its asserts.
 491     if (is_optimized) {
 492       entry      = method_code->verified_entry_point();
 493     } else {
 494       entry      = method_code->entry_point();
 495     }
 496   }
 497   bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
 498   if (entry != NULL && !far_c2a) {
 499     // Call to near compiled code (nmethod or aot).
 500     info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
 501   } else {
 502     if (is_optimized) {
 503       if (far_c2a) {
 504         // Call to aot code from nmethod.
 505         info.set_aot_entry(entry, method());
 506       } else {
 507         // Use stub entry
 508         info.set_interpreter_entry(method()->get_c2i_entry(), method());
 509       }
 510     } else {
 511       // Use icholder entry
 512       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 513       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
 514       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 515     }
 516   }
 517   assert(info.is_optimized() == is_optimized, "must agree");
 518 }
 519 
 520 
 521 bool CompiledIC::is_icholder_entry(address entry) {
 522   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 523   if (cb != NULL && cb->is_adapter_blob()) {
 524     return true;
 525   }
 526   // itable stubs also use CompiledICHolder
 527   if (VtableStubs::is_entry_point(entry) && VtableStubs::stub_containing(entry)->is_itable_stub()) {
 528     return true;
 529   }
 530   return false;
 531 }
 532 
 533 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 534   // This call site might have become stale so inspect it carefully.
 535   address dest = cm->call_wrapper_at(call_site->addr())->destination();
 536   return is_icholder_entry(dest);
 537 }
 538 
 539 // Release the CompiledICHolder* associated with this call site is there is one.
 540 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 541   assert(cm->is_nmethod(), "must be nmethod");
 542   // This call site might have become stale so inspect it carefully.
 543   NativeCall* call = nativeCall_at(call_site->addr());
 544   if (is_icholder_entry(call->destination())) {
 545     NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
 546     InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
 547   }
 548 }
 549 
 550 // ----------------------------------------------------------------------------
 551 
 552 void CompiledStaticCall::set_to_clean() {
 553   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
 554   // Reset call site
 555   MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 556 #ifdef ASSERT
 557   CodeBlob* cb = CodeCache::find_blob_unsafe(instruction_address());
 558   assert(cb != NULL && cb->is_compiled(), "must be compiled");
 559 #endif
 560 
 561   set_destination_mt_safe(resolve_call_stub());
 562 
 563   // Do not reset stub here:  It is too expensive to call find_stub.
 564   // Instead, rely on caller (nmethod::clear_inline_caches) to clear
 565   // both the call and its stub.
 566 }
 567 
 568 bool CompiledStaticCall::is_clean() const {
 569   return destination() == resolve_call_stub();
 570 }
 571 
 572 bool CompiledStaticCall::is_call_to_compiled() const {
 573   return CodeCache::contains(destination());
 574 }
 575 
 576 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
 577   // It is a call to interpreted, if it calls to a stub. Hence, the destination
 578   // must be in the stub part of the nmethod that contains the call
 579   CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
 580   return cm->stub_contains(destination());
 581 }
 582 
 583 bool CompiledDirectStaticCall::is_call_to_far() const {
 584   // It is a call to aot method, if it calls to a stub. Hence, the destination
 585   // must be in the stub part of the nmethod that contains the call
 586   CodeBlob* desc = CodeCache::find_blob(instruction_address());
 587   return desc->as_compiled_method()->stub_contains(destination());
 588 }
 589 
 590 void CompiledStaticCall::set_to_compiled(address entry) {
 591   if (TraceICs) {
 592     ResourceMark rm;
 593     tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
 594         name(),
 595         p2i(instruction_address()),
 596         p2i(entry));
 597   }
 598   // Call to compiled code
 599   assert(CodeCache::contains(entry), "wrong entry point");
 600   set_destination_mt_safe(entry);
 601 }
 602 
 603 void CompiledStaticCall::set(const StaticCallInfo& info) {
 604   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
 605   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 606   // Updating a cache to the wrong entry can cause bugs that are very hard
 607   // to track down - if cache entry gets invalid - we just clean it. In
 608   // this way it is always the same code path that is responsible for
 609   // updating and resolving an inline cache
 610   assert(is_clean(), "do not update a call entry - use clean");
 611 
 612   if (info._to_interpreter) {
 613     // Call to interpreted code
 614     set_to_interpreted(info.callee(), info.entry());
 615 #if INCLUDE_AOT
 616   } else if (info._to_aot) {
 617     // Call to far code
 618     set_to_far(info.callee(), info.entry());
 619 #endif
 620   } else {
 621     set_to_compiled(info.entry());
 622   }
 623 }
 624 
 625 // Compute settings for a CompiledStaticCall. Since we might have to set
 626 // the stub when calling to the interpreter, we need to return arguments.
 627 void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) {
 628   CompiledMethod* m_code = m->code();
 629   info._callee = m;
 630   if (m_code != NULL && m_code->is_in_use()) {
 631     if (caller_is_nmethod && m_code->is_far_code()) {
 632       // Call to far aot code from nmethod.
 633       info._to_aot = true;
 634     } else {
 635       info._to_aot = false;
 636     }
 637     info._to_interpreter = false;
 638     info._entry  = m_code->verified_entry_point();
 639   } else {
 640     // Callee is interpreted code.  In any case entering the interpreter
 641     // puts a converter-frame on the stack to save arguments.
 642     assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
 643     info._to_interpreter = true;
 644     info._entry      = m()->get_c2i_entry();
 645   }
 646 }
 647 
 648 address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
 649   // Find reloc. information containing this call-site
 650   RelocIterator iter((nmethod*)NULL, instruction);
 651   while (iter.next()) {
 652     if (iter.addr() == instruction) {
 653       switch(iter.type()) {
 654         case relocInfo::static_call_type:
 655           return iter.static_call_reloc()->static_stub(is_aot);
 656         // We check here for opt_virtual_call_type, since we reuse the code
 657         // from the CompiledIC implementation
 658         case relocInfo::opt_virtual_call_type:
 659           return iter.opt_virtual_call_reloc()->static_stub(is_aot);
 660         case relocInfo::poll_type:
 661         case relocInfo::poll_return_type: // A safepoint can't overlap a call.
 662         default:
 663           ShouldNotReachHere();
 664       }
 665     }
 666   }
 667   return NULL;
 668 }
 669 
 670 address CompiledDirectStaticCall::find_stub(bool is_aot) {
 671   return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
 672 }
 673 
 674 address CompiledDirectStaticCall::resolve_call_stub() const {
 675   return SharedRuntime::get_resolve_static_call_stub();
 676 }
 677 
 678 //-----------------------------------------------------------------------------
 679 // Non-product mode code
 680 #ifndef PRODUCT
 681 
 682 void CompiledIC::verify() {
 683   _call->verify();
 684   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
 685           || is_optimized() || is_megamorphic(), "sanity check");
 686 }
 687 
 688 void CompiledIC::print() {
 689   print_compiled_ic();
 690   tty->cr();
 691 }
 692 
 693 void CompiledIC::print_compiled_ic() {
 694   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
 695              p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
 696 }
 697 
 698 void CompiledDirectStaticCall::print() {
 699   tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
 700   if (is_clean()) {
 701     tty->print("clean");
 702   } else if (is_call_to_compiled()) {
 703     tty->print("compiled");
 704   } else if (is_call_to_far()) {
 705     tty->print("far");
 706   } else if (is_call_to_interpreted()) {
 707     tty->print("interpreted");
 708   }
 709   tty->cr();
 710 }
 711 
 712 #endif // !PRODUCT