1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "code/nmethod.hpp"
  31 #include "code/vtableStubs.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/linkResolver.hpp"
  34 #include "memory/metadataFactory.hpp"
  35 #include "memory/oopFactory.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/method.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/symbol.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/icache.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "utilities/events.hpp"
  45 
  46 
  47 // Every time a compiled IC is changed or its type is being accessed,
  48 // either the CompiledIC_lock must be set or we must be at a safe point.
  49 
  50 //-----------------------------------------------------------------------------
  51 // Low-level access to an inline cache. Private, since they might not be
  52 // MT-safe to use.
  53 
  54 void* CompiledIC::cached_value() const {
  55   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  56   assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
  57 
  58   if (!is_in_transition_state()) {
  59     void* data = get_data();
  60     // If we let the metadata value here be initialized to zero...
  61     assert(data != NULL || Universe::non_oop_word() == NULL,
  62            "no raw nulls in CompiledIC metadatas, because of patching races");
  63     return (data == (void*)Universe::non_oop_word()) ? NULL : data;
  64   } else {
  65     return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
  66   }
  67 }
  68 
  69 
  70 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
  71   assert(entry_point != NULL, "must set legal entry point");
  72   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  73   assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
  74   assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
  75 
  76   assert(!is_icholder || is_icholder_entry(entry_point), "must be");
  77 
  78   // Don't use ic_destination for this test since that forwards
  79   // through ICBuffer instead of returning the actual current state of
  80   // the CompiledIC.
  81   if (is_icholder_entry(_call->destination())) {
  82     // When patching for the ICStub case the cached value isn't
  83     // overwritten until the ICStub copied into the CompiledIC during
  84     // the next safepoint.  Make sure that the CompiledICHolder* is
  85     // marked for release at this point since it won't be identifiable
  86     // once the entry point is overwritten.
  87     InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
  88   }
  89 
  90   if (TraceCompiledIC) {
  91     tty->print("  ");
  92     print_compiled_ic();
  93     tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
  94     if (!is_optimized()) {
  95       tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
  96     }
  97     if (is_icstub) {
  98       tty->print(" (icstub)");
  99     }
 100     tty->cr();
 101   }
 102 
 103   {
 104     MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 105 #ifdef ASSERT
 106     CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
 107     assert(cb != NULL && cb->is_compiled(), "must be compiled");
 108 #endif
 109     _call->set_destination_mt_safe(entry_point);
 110   }
 111 
 112   if (is_optimized() || is_icstub) {
 113     // Optimized call sites don't have a cache value and ICStub call
 114     // sites only change the entry point.  Changing the value in that
 115     // case could lead to MT safety issues.
 116     assert(cache == NULL, "must be null");
 117     return;
 118   }
 119 
 120   if (cache == NULL)  cache = (void*)Universe::non_oop_word();
 121 
 122   set_data((intptr_t)cache);
 123 }
 124 
 125 
 126 void CompiledIC::set_ic_destination(ICStub* stub) {
 127   internal_set_ic_destination(stub->code_begin(), true, NULL, false);
 128 }
 129 
 130 
 131 
 132 address CompiledIC::ic_destination() const {
 133  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 134  if (!is_in_transition_state()) {
 135    return _call->destination();
 136  } else {
 137    return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
 138  }
 139 }
 140 
 141 
 142 bool CompiledIC::is_in_transition_state() const {
 143   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 144   return InlineCacheBuffer::contains(_call->destination());;
 145 }
 146 
 147 
 148 bool CompiledIC::is_icholder_call() const {
 149   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 150   return !_is_optimized && is_icholder_entry(ic_destination());
 151 }
 152 
 153 // Returns native address of 'call' instruction in inline-cache. Used by
 154 // the InlineCacheBuffer when it needs to find the stub.
 155 address CompiledIC::stub_address() const {
 156   assert(is_in_transition_state(), "should only be called when we are in a transition state");
 157   return _call->destination();
 158 }
 159 
 160 // Clears the IC stub if the compiled IC is in transition state
 161 void CompiledIC::clear_ic_stub() {
 162   if (is_in_transition_state()) {
 163     ICStub* stub = ICStub_from_destination_address(stub_address());
 164     stub->clear();
 165   }
 166 }
 167 
 168 //-----------------------------------------------------------------------------
 169 // High-level access to an inline cache. Guaranteed to be MT-safe.
 170 
 171 void CompiledIC::initialize_from_iter(RelocIterator* iter) {
 172   assert(iter->addr() == _call->instruction_address(), "must find ic_call");
 173 
 174   if (iter->type() == relocInfo::virtual_call_type) {
 175     virtual_call_Relocation* r = iter->virtual_call_reloc();
 176     _is_optimized = false;
 177     _value = _call->get_load_instruction(r);
 178   } else {
 179     assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
 180     _is_optimized = true;
 181     _value = NULL;
 182   }
 183 }
 184 
 185 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
 186   : _method(cm)
 187 {
 188   _call = _method->call_wrapper_at((address) call);
 189   address ic_call = _call->instruction_address();
 190 
 191   assert(ic_call != NULL, "ic_call address must be set");
 192   assert(cm != NULL, "must pass compiled method");
 193   assert(cm->contains(ic_call), "must be in compiled method");
 194 
 195   // Search for the ic_call at the given address.
 196   RelocIterator iter(cm, ic_call, ic_call+1);
 197   bool ret = iter.next();
 198   assert(ret == true, "relocInfo must exist at this address");
 199   assert(iter.addr() == ic_call, "must find ic_call");
 200 
 201   initialize_from_iter(&iter);
 202 }
 203 
 204 CompiledIC::CompiledIC(RelocIterator* iter)
 205   : _method(iter->code())
 206 {
 207   _call = _method->call_wrapper_at(iter->addr());
 208   address ic_call = _call->instruction_address();
 209 
 210   CompiledMethod* nm = iter->code();
 211   assert(ic_call != NULL, "ic_call address must be set");
 212   assert(nm != NULL, "must pass compiled method");
 213   assert(nm->contains(ic_call), "must be in compiled method");
 214 
 215   initialize_from_iter(iter);
 216 }
 217 
 218 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
 219   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 220   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
 221   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 222 
 223   address entry;
 224   if (call_info->call_kind() == CallInfo::itable_call) {
 225     assert(bytecode == Bytecodes::_invokeinterface, "");
 226     int itable_index = call_info->itable_index();
 227     entry = VtableStubs::find_itable_stub(itable_index);
 228     if (entry == NULL) {
 229       return false;
 230     }
 231 #ifdef ASSERT
 232     int index = call_info->resolved_method()->itable_index();
 233     assert(index == itable_index, "CallInfo pre-computes this");
 234     InstanceKlass* k = call_info->resolved_method()->method_holder();
 235     assert(k->verify_itable_index(itable_index), "sanity check");
 236 #endif //ASSERT
 237     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
 238                                                     call_info->resolved_klass(), false);
 239     holder->claim();
 240     InlineCacheBuffer::create_transition_stub(this, holder, entry);
 241   } else {
 242     assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
 243     // Can be different than selected_method->vtable_index(), due to package-private etc.
 244     int vtable_index = call_info->vtable_index();
 245     assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
 246     entry = VtableStubs::find_vtable_stub(vtable_index);
 247     if (entry == NULL) {
 248       return false;
 249     }
 250     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
 251   }
 252 
 253   if (TraceICs) {
 254     ResourceMark rm;
 255     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
 256                    p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
 257   }
 258 
 259   // We can't check this anymore. With lazy deopt we could have already
 260   // cleaned this IC entry before we even return. This is possible if
 261   // we ran out of space in the inline cache buffer trying to do the
 262   // set_next and we safepointed to free up space. This is a benign
 263   // race because the IC entry was complete when we safepointed so
 264   // cleaning it immediately is harmless.
 265   // assert(is_megamorphic(), "sanity check");
 266   return true;
 267 }
 268 
 269 
 270 // true if destination is megamorphic stub
 271 bool CompiledIC::is_megamorphic() const {
 272   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 273   assert(!is_optimized(), "an optimized call cannot be megamorphic");
 274 
 275   // Cannot rely on cached_value. It is either an interface or a method.
 276   return VtableStubs::entry_point(ic_destination()) != NULL;
 277 }
 278 
 279 bool CompiledIC::is_call_to_compiled() const {
 280   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 281 
 282   // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
 283   // method is guaranteed to still exist, since we only remove methods after all inline caches
 284   // has been cleaned up
 285   CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 286   bool is_monomorphic = (cb != NULL && cb->is_compiled());
 287   // Check that the cached_value is a klass for non-optimized monomorphic calls
 288   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
 289   // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
 290   // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
 291   // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
 292 #ifdef ASSERT
 293   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
 294   bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
 295   assert( is_c1_or_jvmci_method ||
 296          !is_monomorphic ||
 297          is_optimized() ||
 298          !caller->is_alive() ||
 299          (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
 300 #endif // ASSERT
 301   return is_monomorphic;
 302 }
 303 
 304 
 305 bool CompiledIC::is_call_to_interpreted() const {
 306   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 307   // Call to interpreter if destination is either calling to a stub (if it
 308   // is optimized), or calling to an I2C blob
 309   bool is_call_to_interpreted = false;
 310   if (!is_optimized()) {
 311     // must use unsafe because the destination can be a zombie (and we're cleaning)
 312     // and the print_compiled_ic code wants to know if site (in the non-zombie)
 313     // is to the interpreter.
 314     CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
 315     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
 316     assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
 317   } else {
 318     // Check if we are calling into our own codeblob (i.e., to a stub)
 319     address dest = ic_destination();
 320 #ifdef ASSERT
 321     {
 322       _call->verify_resolve_call(dest);
 323     }
 324 #endif /* ASSERT */
 325     is_call_to_interpreted = _call->is_call_to_interpreted(dest);
 326   }
 327   return is_call_to_interpreted;
 328 }
 329 
 330 void CompiledIC::set_to_clean(bool in_use) {
 331   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
 332   if (TraceInlineCacheClearing || TraceICs) {
 333     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
 334     print();
 335   }
 336 
 337   address entry = _call->get_resolve_call_stub(is_optimized());
 338 
 339   // A zombie transition will always be safe, since the metadata has already been set to NULL, so
 340   // we only need to patch the destination
 341   bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
 342 
 343   if (safe_transition) {
 344     // Kill any leftover stub we might have too
 345     clear_ic_stub();
 346     if (is_optimized()) {
 347       set_ic_destination(entry);
 348     } else {
 349       set_ic_destination_and_value(entry, (void*)NULL);
 350     }
 351   } else {
 352     // Unsafe transition - create stub.
 353     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
 354   }
 355   // We can't check this anymore. With lazy deopt we could have already
 356   // cleaned this IC entry before we even return. This is possible if
 357   // we ran out of space in the inline cache buffer trying to do the
 358   // set_next and we safepointed to free up space. This is a benign
 359   // race because the IC entry was complete when we safepointed so
 360   // cleaning it immediately is harmless.
 361   // assert(is_clean(), "sanity check");
 362 }
 363 
 364 bool CompiledIC::is_clean() const {
 365   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 366   bool is_clean = false;
 367   address dest = ic_destination();
 368   is_clean = dest == _call->get_resolve_call_stub(is_optimized());
 369   assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
 370   return is_clean;
 371 }
 372 
 373 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
 374   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
 375   // Updating a cache to the wrong entry can cause bugs that are very hard
 376   // to track down - if cache entry gets invalid - we just clean it. In
 377   // this way it is always the same code path that is responsible for
 378   // updating and resolving an inline cache
 379   //
 380   // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
 381   // callsites. In addition ic_miss code will update a site to monomorphic if it determines
 382   // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
 383   //
 384   // In both of these cases the only thing being modifed is the jump/call target and these
 385   // transitions are mt_safe
 386 
 387   Thread *thread = Thread::current();
 388   if (info.to_interpreter() || info.to_aot()) {
 389     // Call to interpreter
 390     if (info.is_optimized() && is_optimized()) {
 391        assert(is_clean(), "unsafe IC path");
 392        MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 393       // the call analysis (callee structure) specifies that the call is optimized
 394       // (either because of CHA or the static target is final)
 395       // At code generation time, this call has been emitted as static call
 396       // Call via stub
 397       assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
 398       methodHandle method (thread, (Method*)info.cached_metadata());
 399       _call->set_to_interpreted(method, info);
 400 
 401       if (TraceICs) {
 402          ResourceMark rm(thread);
 403          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
 404            p2i(instruction_address()),
 405            (info.to_aot() ? "aot" : "interpreter"),
 406            method->print_value_string());
 407       }
 408     } else {
 409       // Call via method-klass-holder
 410       InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
 411       if (TraceICs) {
 412          ResourceMark rm(thread);
 413          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
 414       }
 415     }
 416   } else {
 417     // Call to compiled code
 418     bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
 419 #ifdef ASSERT
 420     CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
 421     assert (cb != NULL && cb->is_compiled(), "must be compiled!");
 422 #endif /* ASSERT */
 423 
 424     // This is MT safe if we come from a clean-cache and go through a
 425     // non-verified entry point
 426     bool safe = SafepointSynchronize::is_at_safepoint() ||
 427                 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
 428 
 429     if (!safe) {
 430       InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
 431     } else {
 432       if (is_optimized()) {
 433         set_ic_destination(info.entry());
 434       } else {
 435         set_ic_destination_and_value(info.entry(), info.cached_metadata());
 436       }
 437     }
 438 
 439     if (TraceICs) {
 440       ResourceMark rm(thread);
 441       assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
 442       tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
 443         p2i(instruction_address()),
 444         ((Klass*)info.cached_metadata())->print_value_string(),
 445         (safe) ? "" : "via stub");
 446     }
 447   }
 448   // We can't check this anymore. With lazy deopt we could have already
 449   // cleaned this IC entry before we even return. This is possible if
 450   // we ran out of space in the inline cache buffer trying to do the
 451   // set_next and we safepointed to free up space. This is a benign
 452   // race because the IC entry was complete when we safepointed so
 453   // cleaning it immediately is harmless.
 454   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
 455 }
 456 
 457 
 458 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
 459 // static_bound: The call can be static bound. If it isn't also optimized, the property
 460 // wasn't provable at time of compilation. An optimized call will have any necessary
 461 // null check, while a static_bound won't. A static_bound (but not optimized) must
 462 // therefore use the unverified entry point.
 463 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
 464                                            Klass* receiver_klass,
 465                                            bool is_optimized,
 466                                            bool static_bound,
 467                                            bool caller_is_nmethod,
 468                                            CompiledICInfo& info,
 469                                            TRAPS) {
 470   CompiledMethod* method_code = method->code();
 471 
 472   address entry = NULL;
 473   if (method_code != NULL && method_code->is_in_use()) {
 474     assert(method_code->is_compiled(), "must be compiled");
 475     // Call to compiled code
 476     //
 477     // Note: the following problem exists with Compiler1:
 478     //   - at compile time we may or may not know if the destination is final
 479     //   - if we know that the destination is final (is_optimized), we will emit
 480     //     an optimized virtual call (no inline cache), and need a Method* to make
 481     //     a call to the interpreter
 482     //   - if we don't know if the destination is final, we emit a standard
 483     //     virtual call, and use CompiledICHolder to call interpreted code
 484     //     (no static call stub has been generated)
 485     //   - In the case that we here notice the call is static bound we
 486     //     convert the call into what looks to be an optimized virtual call,
 487     //     but we must use the unverified entry point (since there will be no
 488     //     null check on a call when the target isn't loaded).
 489     //     This causes problems when verifying the IC because
 490     //     it looks vanilla but is optimized. Code in is_call_to_interpreted
 491     //     is aware of this and weakens its asserts.
 492     if (is_optimized) {
 493       entry      = method_code->verified_entry_point();
 494     } else {
 495       entry      = method_code->entry_point();
 496     }
 497   }
 498   bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
 499   if (entry != NULL && !far_c2a) {
 500     // Call to near compiled code (nmethod or aot).
 501     info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
 502   } else {
 503     if (is_optimized) {
 504       if (far_c2a) {
 505         // Call to aot code from nmethod.
 506         info.set_aot_entry(entry, method());
 507       } else {
 508         // Use stub entry
 509         info.set_interpreter_entry(method()->get_c2i_entry(), method());
 510       }
 511     } else {
 512       // Use icholder entry
 513       assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
 514       CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
 515       info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
 516     }
 517   }
 518   assert(info.is_optimized() == is_optimized, "must agree");
 519 }
 520 
 521 
 522 bool CompiledIC::is_icholder_entry(address entry) {
 523   CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
 524   if (cb != NULL && cb->is_adapter_blob()) {
 525     return true;
 526   }
 527   // itable stubs also use CompiledICHolder
 528   if (cb != NULL && cb->is_vtable_blob()) {
 529     VtableStub* s = VtableStubs::entry_point(entry); 
 530     return (s != NULL) && s->is_itable_stub(); 
 531   }
 532 
 533   return false;
 534 }
 535 
 536 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 537   // This call site might have become stale so inspect it carefully.
 538   address dest = cm->call_wrapper_at(call_site->addr())->destination();
 539   return is_icholder_entry(dest);
 540 }
 541 
 542 // Release the CompiledICHolder* associated with this call site is there is one.
 543 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
 544   assert(cm->is_nmethod(), "must be nmethod");
 545   // This call site might have become stale so inspect it carefully.
 546   NativeCall* call = nativeCall_at(call_site->addr());
 547   if (is_icholder_entry(call->destination())) {
 548     NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
 549     InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
 550   }
 551 }
 552 
 553 // ----------------------------------------------------------------------------
 554 
 555 void CompiledStaticCall::set_to_clean() {
 556   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
 557   // Reset call site
 558   MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
 559 #ifdef ASSERT
 560   CodeBlob* cb = CodeCache::find_blob_unsafe(instruction_address());
 561   assert(cb != NULL && cb->is_compiled(), "must be compiled");
 562 #endif
 563 
 564   set_destination_mt_safe(resolve_call_stub());
 565 
 566   // Do not reset stub here:  It is too expensive to call find_stub.
 567   // Instead, rely on caller (nmethod::clear_inline_caches) to clear
 568   // both the call and its stub.
 569 }
 570 
 571 bool CompiledStaticCall::is_clean() const {
 572   return destination() == resolve_call_stub();
 573 }
 574 
 575 bool CompiledStaticCall::is_call_to_compiled() const {
 576   return CodeCache::contains(destination());
 577 }
 578 
 579 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
 580   // It is a call to interpreted, if it calls to a stub. Hence, the destination
 581   // must be in the stub part of the nmethod that contains the call
 582   CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
 583   return cm->stub_contains(destination());
 584 }
 585 
 586 bool CompiledDirectStaticCall::is_call_to_far() const {
 587   // It is a call to aot method, if it calls to a stub. Hence, the destination
 588   // must be in the stub part of the nmethod that contains the call
 589   CodeBlob* desc = CodeCache::find_blob(instruction_address());
 590   return desc->as_compiled_method()->stub_contains(destination());
 591 }
 592 
 593 void CompiledStaticCall::set_to_compiled(address entry) {
 594   if (TraceICs) {
 595     ResourceMark rm;
 596     tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
 597         name(),
 598         p2i(instruction_address()),
 599         p2i(entry));
 600   }
 601   // Call to compiled code
 602   assert(CodeCache::contains(entry), "wrong entry point");
 603   set_destination_mt_safe(entry);
 604 }
 605 
 606 void CompiledStaticCall::set(const StaticCallInfo& info) {
 607   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
 608   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 609   // Updating a cache to the wrong entry can cause bugs that are very hard
 610   // to track down - if cache entry gets invalid - we just clean it. In
 611   // this way it is always the same code path that is responsible for
 612   // updating and resolving an inline cache
 613   assert(is_clean(), "do not update a call entry - use clean");
 614 
 615   if (info._to_interpreter) {
 616     // Call to interpreted code
 617     set_to_interpreted(info.callee(), info.entry());
 618 #if INCLUDE_AOT
 619   } else if (info._to_aot) {
 620     // Call to far code
 621     set_to_far(info.callee(), info.entry());
 622 #endif
 623   } else {
 624     set_to_compiled(info.entry());
 625   }
 626 }
 627 
 628 // Compute settings for a CompiledStaticCall. Since we might have to set
 629 // the stub when calling to the interpreter, we need to return arguments.
 630 void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) {
 631   CompiledMethod* m_code = m->code();
 632   info._callee = m;
 633   if (m_code != NULL && m_code->is_in_use()) {
 634     if (caller_is_nmethod && m_code->is_far_code()) {
 635       // Call to far aot code from nmethod.
 636       info._to_aot = true;
 637     } else {
 638       info._to_aot = false;
 639     }
 640     info._to_interpreter = false;
 641     info._entry  = m_code->verified_entry_point();
 642   } else {
 643     // Callee is interpreted code.  In any case entering the interpreter
 644     // puts a converter-frame on the stack to save arguments.
 645     assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
 646     info._to_interpreter = true;
 647     info._entry      = m()->get_c2i_entry();
 648   }
 649 }
 650 
 651 address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
 652   // Find reloc. information containing this call-site
 653   RelocIterator iter((nmethod*)NULL, instruction);
 654   while (iter.next()) {
 655     if (iter.addr() == instruction) {
 656       switch(iter.type()) {
 657         case relocInfo::static_call_type:
 658           return iter.static_call_reloc()->static_stub(is_aot);
 659         // We check here for opt_virtual_call_type, since we reuse the code
 660         // from the CompiledIC implementation
 661         case relocInfo::opt_virtual_call_type:
 662           return iter.opt_virtual_call_reloc()->static_stub(is_aot);
 663         case relocInfo::poll_type:
 664         case relocInfo::poll_return_type: // A safepoint can't overlap a call.
 665         default:
 666           ShouldNotReachHere();
 667       }
 668     }
 669   }
 670   return NULL;
 671 }
 672 
 673 address CompiledDirectStaticCall::find_stub(bool is_aot) {
 674   return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
 675 }
 676 
 677 address CompiledDirectStaticCall::resolve_call_stub() const {
 678   return SharedRuntime::get_resolve_static_call_stub();
 679 }
 680 
 681 //-----------------------------------------------------------------------------
 682 // Non-product mode code
 683 #ifndef PRODUCT
 684 
 685 void CompiledIC::verify() {
 686   _call->verify();
 687   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
 688           || is_optimized() || is_megamorphic(), "sanity check");
 689 }
 690 
 691 void CompiledIC::print() {
 692   print_compiled_ic();
 693   tty->cr();
 694 }
 695 
 696 void CompiledIC::print_compiled_ic() {
 697   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
 698              p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
 699 }
 700 
 701 void CompiledDirectStaticCall::print() {
 702   tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
 703   if (is_clean()) {
 704     tty->print("clean");
 705   } else if (is_call_to_compiled()) {
 706     tty->print("compiled");
 707   } else if (is_call_to_far()) {
 708     tty->print("far");
 709   } else if (is_call_to_interpreted()) {
 710     tty->print("interpreted");
 711   }
 712   tty->cr();
 713 }
 714 
 715 #endif // !PRODUCT