275 // Cannot rely on cached_value. It is either an interface or a method.
276 return VtableStubs::entry_point(ic_destination()) != NULL;
277 }
278
279 bool CompiledIC::is_call_to_compiled() const {
280 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || _method->is_safe_for_ic_patching(), "");
281
282 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
283 // method is guaranteed to still exist, since we only remove methods after all inline caches
284 // has been cleaned up
285 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
286 bool is_monomorphic = (cb != NULL && cb->is_compiled());
287 // Check that the cached_value is a klass for non-optimized monomorphic calls
288 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
289 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
290 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
291 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
292 #ifdef ASSERT
293 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
294 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
295 assert( is_c1_or_jvmci_method ||
296 !is_monomorphic ||
297 is_optimized() ||
298 !caller->is_alive() ||
299 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
300 #endif // ASSERT
301 return is_monomorphic;
302 }
303
304
305 bool CompiledIC::is_call_to_interpreted() const {
306 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || _method->is_safe_for_ic_patching(), "");
307 // Call to interpreter if destination is either calling to a stub (if it
308 // is optimized), or calling to an I2C blob
309 bool is_call_to_interpreted = false;
310 if (!is_optimized()) {
311 // must use unsafe because the destination can be a zombie (and we're cleaning)
312 // and the print_compiled_ic code wants to know if site (in the non-zombie)
313 // is to the interpreter.
314 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
315 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
316 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
317 } else {
|
275 // Cannot rely on cached_value. It is either an interface or a method.
276 return VtableStubs::entry_point(ic_destination()) != NULL;
277 }
278
279 bool CompiledIC::is_call_to_compiled() const {
280 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || _method->is_safe_for_ic_patching(), "");
281
282 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
283 // method is guaranteed to still exist, since we only remove methods after all inline caches
284 // has been cleaned up
285 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
286 bool is_monomorphic = (cb != NULL && cb->is_compiled());
287 // Check that the cached_value is a klass for non-optimized monomorphic calls
288 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
289 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
290 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
291 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
292 #ifdef ASSERT
293 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
294 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
295 assert(is_c1_or_jvmci_method ||
296 !is_monomorphic ||
297 is_optimized() ||
298 !caller->is_alive() ||
299 (caller->is_nmethod() && ((nmethod*)caller)->is_unloading()) ||
300 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
301 #endif // ASSERT
302 return is_monomorphic;
303 }
304
305
306 bool CompiledIC::is_call_to_interpreted() const {
307 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || _method->is_safe_for_ic_patching(), "");
308 // Call to interpreter if destination is either calling to a stub (if it
309 // is optimized), or calling to an I2C blob
310 bool is_call_to_interpreted = false;
311 if (!is_optimized()) {
312 // must use unsafe because the destination can be a zombie (and we're cleaning)
313 // and the print_compiled_ic code wants to know if site (in the non-zombie)
314 // is to the interpreter.
315 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
316 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
317 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
318 } else {
|
349 set_ic_destination_and_value(entry, (void*)NULL);
350 }
351 } else {
352 // Unsafe transition - create stub.
353 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
354 }
355 // We can't check this anymore. With lazy deopt we could have already
356 // cleaned this IC entry before we even return. This is possible if
357 // we ran out of space in the inline cache buffer trying to do the
358 // set_next and we safepointed to free up space. This is a benign
359 // race because the IC entry was complete when we safepointed so
360 // cleaning it immediately is harmless.
361 // assert(is_clean(), "sanity check");
362 }
363
364 bool CompiledIC::is_clean() const {
365 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || _method->is_safe_for_ic_patching(), "");
366 bool is_clean = false;
367 address dest = ic_destination();
368 is_clean = dest == _call->get_resolve_call_stub(is_optimized());
369 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
370 return is_clean;
371 }
372
373 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
374 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
375 // Updating a cache to the wrong entry can cause bugs that are very hard
376 // to track down - if cache entry gets invalid - we just clean it. In
377 // this way it is always the same code path that is responsible for
378 // updating and resolving an inline cache
379 //
380 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
381 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
382 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
383 //
384 // In both of these cases the only thing being modifed is the jump/call target and these
385 // transitions are mt_safe
386
387 Thread *thread = Thread::current();
388 if (info.to_interpreter() || info.to_aot()) {
|
350 set_ic_destination_and_value(entry, (void*)NULL);
351 }
352 } else {
353 // Unsafe transition - create stub.
354 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
355 }
356 // We can't check this anymore. With lazy deopt we could have already
357 // cleaned this IC entry before we even return. This is possible if
358 // we ran out of space in the inline cache buffer trying to do the
359 // set_next and we safepointed to free up space. This is a benign
360 // race because the IC entry was complete when we safepointed so
361 // cleaning it immediately is harmless.
362 // assert(is_clean(), "sanity check");
363 }
364
365 bool CompiledIC::is_clean() const {
366 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || _method->is_safe_for_ic_patching(), "");
367 bool is_clean = false;
368 address dest = ic_destination();
369 is_clean = dest == _call->get_resolve_call_stub(is_optimized());
370 assert(UseZGC || !is_clean || is_optimized() || cached_value() == NULL, "sanity check");
371 return is_clean;
372 }
373
374 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
375 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
376 // Updating a cache to the wrong entry can cause bugs that are very hard
377 // to track down - if cache entry gets invalid - we just clean it. In
378 // this way it is always the same code path that is responsible for
379 // updating and resolving an inline cache
380 //
381 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
382 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
383 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
384 //
385 // In both of these cases the only thing being modifed is the jump/call target and these
386 // transitions are mt_safe
387
388 Thread *thread = Thread::current();
389 if (info.to_interpreter() || info.to_aot()) {
|
674 }
675 }
676 }
677 return NULL;
678 }
679
680 address CompiledDirectStaticCall::find_stub(bool is_aot) {
681 return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
682 }
683
684 address CompiledDirectStaticCall::resolve_call_stub() const {
685 return SharedRuntime::get_resolve_static_call_stub();
686 }
687
688 //-----------------------------------------------------------------------------
689 // Non-product mode code
690 #ifndef PRODUCT
691
692 void CompiledIC::verify() {
693 _call->verify();
694 if (_method->is_safe_for_ic_patching()) {
695 return;
696 } else {
697 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
698 || is_optimized() || is_megamorphic(), "sanity check");
699 }
700 }
701
702 void CompiledIC::print() {
703 print_compiled_ic();
704 tty->cr();
705 }
706
707 void CompiledIC::print_compiled_ic() {
708 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
709 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ?
710 }
711
712 void CompiledDirectStaticCall::print() {
713 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
|
675 }
676 }
677 }
678 return NULL;
679 }
680
681 address CompiledDirectStaticCall::find_stub(bool is_aot) {
682 return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
683 }
684
685 address CompiledDirectStaticCall::resolve_call_stub() const {
686 return SharedRuntime::get_resolve_static_call_stub();
687 }
688
689 //-----------------------------------------------------------------------------
690 // Non-product mode code
691 #ifndef PRODUCT
692
693 void CompiledIC::verify() {
694 _call->verify();
695 if (UseZGC) {
696 return;
697 } else {
698 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
699 || is_optimized() || is_megamorphic(), "sanity check");
700 }
701 }
702
703 void CompiledIC::print() {
704 print_compiled_ic();
705 tty->cr();
706 }
707
708 void CompiledIC::print_compiled_ic() {
709 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
710 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ?
711 }
712
713 void CompiledDirectStaticCall::print() {
714 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
|