241 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
242 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
243
244 address entry;
245 if (call_info->call_kind() == CallInfo::itable_call) {
246 assert(bytecode == Bytecodes::_invokeinterface, "");
247 int itable_index = call_info->itable_index();
248 entry = VtableStubs::find_itable_stub(itable_index);
249 if (entry == NULL) {
250 return false;
251 }
252 #ifdef ASSERT
253 int index = call_info->resolved_method()->itable_index();
254 assert(index == itable_index, "CallInfo pre-computes this");
255 InstanceKlass* k = call_info->resolved_method()->method_holder();
256 assert(k->verify_itable_index(itable_index), "sanity check");
257 #endif //ASSERT
258 CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
259 call_info->resolved_klass(), false);
260 holder->claim();
261 InlineCacheBuffer::create_transition_stub(this, holder, entry);
262 } else {
263 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
264 // Can be different than selected_method->vtable_index(), due to package-private etc.
265 int vtable_index = call_info->vtable_index();
266 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
267 entry = VtableStubs::find_vtable_stub(vtable_index);
268 if (entry == NULL) {
269 return false;
270 }
271 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
272 }
273
274 if (TraceICs) {
275 ResourceMark rm;
276 assert(!call_info->selected_method().is_null(), "Unexpected null selected method");
277 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
278 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
279 }
280
281 // We can't check this anymore. With lazy deopt we could have already
282 // cleaned this IC entry before we even return. This is possible if
283 // we ran out of space in the inline cache buffer trying to do the
284 // set_next and we safepointed to free up space. This is a benign
285 // race because the IC entry was complete when we safepointed so
286 // cleaning it immediately is harmless.
287 // assert(is_megamorphic(), "sanity check");
288 return true;
289 }
290
|
241 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
242 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
243
244 address entry;
245 if (call_info->call_kind() == CallInfo::itable_call) {
246 assert(bytecode == Bytecodes::_invokeinterface, "");
247 int itable_index = call_info->itable_index();
248 entry = VtableStubs::find_itable_stub(itable_index);
249 if (entry == NULL) {
250 return false;
251 }
252 #ifdef ASSERT
253 int index = call_info->resolved_method()->itable_index();
254 assert(index == itable_index, "CallInfo pre-computes this");
255 InstanceKlass* k = call_info->resolved_method()->method_holder();
256 assert(k->verify_itable_index(itable_index), "sanity check");
257 #endif //ASSERT
258 CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
259 call_info->resolved_klass(), false);
260 holder->claim();
261 if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
262 delete holder;
263 return false;
264 }
265 } else {
266 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
267 // Can be different than selected_method->vtable_index(), due to package-private etc.
268 int vtable_index = call_info->vtable_index();
269 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
270 entry = VtableStubs::find_vtable_stub(vtable_index);
271 if (entry == NULL) {
272 return false;
273 }
274 if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
275 return false;
276 }
277 }
278
279 if (TraceICs) {
280 ResourceMark rm;
281 assert(!call_info->selected_method().is_null(), "Unexpected null selected method");
282 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
283 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
284 }
285
286 // We can't check this anymore. With lazy deopt we could have already
287 // cleaned this IC entry before we even return. This is possible if
288 // we ran out of space in the inline cache buffer trying to do the
289 // set_next and we safepointed to free up space. This is a benign
290 // race because the IC entry was complete when we safepointed so
291 // cleaning it immediately is harmless.
292 // assert(is_megamorphic(), "sanity check");
293 return true;
294 }
295
|
332 if (!is_optimized()) {
333 // must use unsafe because the destination can be a zombie (and we're cleaning)
334 // and the print_compiled_ic code wants to know if site (in the non-zombie)
335 // is to the interpreter.
336 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
337 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
338 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
339 } else {
340 // Check if we are calling into our own codeblob (i.e., to a stub)
341 address dest = ic_destination();
342 #ifdef ASSERT
343 {
344 _call->verify_resolve_call(dest);
345 }
346 #endif /* ASSERT */
347 is_call_to_interpreted = _call->is_call_to_interpreted(dest);
348 }
349 return is_call_to_interpreted;
350 }
351
352 void CompiledIC::set_to_clean(bool in_use) {
353 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
354 if (TraceInlineCacheClearing || TraceICs) {
355 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
356 print();
357 }
358
359 address entry = _call->get_resolve_call_stub(is_optimized());
360
361 // A zombie transition will always be safe, since the metadata has already been set to NULL, so
362 // we only need to patch the destination
363 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || CompiledICLocker::is_safe(_method);
364
365 if (safe_transition) {
366 // Kill any leftover stub we might have too
367 clear_ic_stub();
368 if (is_optimized()) {
369 set_ic_destination(entry);
370 } else {
371 set_ic_destination_and_value(entry, (void*)NULL);
372 }
373 } else {
374 // Unsafe transition - create stub.
375 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
376 }
377 // We can't check this anymore. With lazy deopt we could have already
378 // cleaned this IC entry before we even return. This is possible if
379 // we ran out of space in the inline cache buffer trying to do the
380 // set_next and we safepointed to free up space. This is a benign
381 // race because the IC entry was complete when we safepointed so
382 // cleaning it immediately is harmless.
383 // assert(is_clean(), "sanity check");
384 }
385
386 bool CompiledIC::is_clean() const {
387 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
388 bool is_clean = false;
389 address dest = ic_destination();
390 is_clean = dest == _call->get_resolve_call_stub(is_optimized());
391 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
392 return is_clean;
393 }
394
395 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
396 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
397 // Updating a cache to the wrong entry can cause bugs that are very hard
398 // to track down - if cache entry gets invalid - we just clean it. In
399 // this way it is always the same code path that is responsible for
400 // updating and resolving an inline cache
401 //
402 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
403 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
404 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
405 //
406 // In both of these cases the only thing being modifed is the jump/call target and these
407 // transitions are mt_safe
408
409 Thread *thread = Thread::current();
410 if (info.to_interpreter() || info.to_aot()) {
411 // Call to interpreter
412 if (info.is_optimized() && is_optimized()) {
413 assert(is_clean(), "unsafe IC path");
414 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
415 // the call analysis (callee structure) specifies that the call is optimized
416 // (either because of CHA or the static target is final)
417 // At code generation time, this call has been emitted as static call
418 // Call via stub
419 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
420 methodHandle method (thread, (Method*)info.cached_metadata());
421 _call->set_to_interpreted(method, info);
422
423 if (TraceICs) {
424 ResourceMark rm(thread);
425 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
426 p2i(instruction_address()),
427 (info.to_aot() ? "aot" : "interpreter"),
428 method->print_value_string());
429 }
430 } else {
431 // Call via method-klass-holder
432 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
433 if (TraceICs) {
434 ResourceMark rm(thread);
435 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
436 }
437 }
438 } else {
439 // Call to compiled code
440 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
441 #ifdef ASSERT
442 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
443 assert (cb != NULL && cb->is_compiled(), "must be compiled!");
444 #endif /* ASSERT */
445
446 // This is MT safe if we come from a clean-cache and go through a
447 // non-verified entry point
448 bool safe = SafepointSynchronize::is_at_safepoint() ||
449 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
450
451 if (!safe) {
452 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
453 } else {
454 if (is_optimized()) {
455 set_ic_destination(info.entry());
456 } else {
457 set_ic_destination_and_value(info.entry(), info.cached_metadata());
458 }
459 }
460
461 if (TraceICs) {
462 ResourceMark rm(thread);
463 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
464 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
465 p2i(instruction_address()),
466 ((Klass*)info.cached_metadata())->print_value_string(),
467 (safe) ? "" : "via stub");
468 }
469 }
470 // We can't check this anymore. With lazy deopt we could have already
471 // cleaned this IC entry before we even return. This is possible if
472 // we ran out of space in the inline cache buffer trying to do the
473 // set_next and we safepointed to free up space. This is a benign
474 // race because the IC entry was complete when we safepointed so
475 // cleaning it immediately is harmless.
476 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
477 }
478
479
480 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
481 // static_bound: The call can be static bound. If it isn't also optimized, the property
482 // wasn't provable at time of compilation. An optimized call will have any necessary
483 // null check, while a static_bound won't. A static_bound (but not optimized) must
484 // therefore use the unverified entry point.
485 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
486 Klass* receiver_klass,
487 bool is_optimized,
488 bool static_bound,
489 bool caller_is_nmethod,
490 CompiledICInfo& info,
491 TRAPS) {
492 CompiledMethod* method_code = method->code();
493
494 address entry = NULL;
495 if (method_code != NULL && method_code->is_in_use()) {
|
337 if (!is_optimized()) {
338 // must use unsafe because the destination can be a zombie (and we're cleaning)
339 // and the print_compiled_ic code wants to know if site (in the non-zombie)
340 // is to the interpreter.
341 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
342 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
343 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
344 } else {
345 // Check if we are calling into our own codeblob (i.e., to a stub)
346 address dest = ic_destination();
347 #ifdef ASSERT
348 {
349 _call->verify_resolve_call(dest);
350 }
351 #endif /* ASSERT */
352 is_call_to_interpreted = _call->is_call_to_interpreted(dest);
353 }
354 return is_call_to_interpreted;
355 }
356
357 bool CompiledIC::set_to_clean(bool in_use) {
358 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
359 if (TraceInlineCacheClearing || TraceICs) {
360 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
361 print();
362 }
363
364 address entry = _call->get_resolve_call_stub(is_optimized());
365
366 // A zombie transition will always be safe, since the metadata has already been set to NULL, so
367 // we only need to patch the destination
368 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || CompiledICLocker::is_safe(_method);
369
370 if (safe_transition) {
371 // Kill any leftover stub we might have too
372 clear_ic_stub();
373 if (is_optimized()) {
374 set_ic_destination(entry);
375 } else {
376 set_ic_destination_and_value(entry, (void*)NULL);
377 }
378 } else {
379 // Unsafe transition - create stub.
380 if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
381 return false;
382 }
383 }
384 // We can't check this anymore. With lazy deopt we could have already
385 // cleaned this IC entry before we even return. This is possible if
386 // we ran out of space in the inline cache buffer trying to do the
387 // set_next and we safepointed to free up space. This is a benign
388 // race because the IC entry was complete when we safepointed so
389 // cleaning it immediately is harmless.
390 // assert(is_clean(), "sanity check");
391 return true;
392 }
393
394 bool CompiledIC::is_clean() const {
395 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
396 bool is_clean = false;
397 address dest = ic_destination();
398 is_clean = dest == _call->get_resolve_call_stub(is_optimized());
399 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
400 return is_clean;
401 }
402
403 bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
404 assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
405 // Updating a cache to the wrong entry can cause bugs that are very hard
406 // to track down - if cache entry gets invalid - we just clean it. In
407 // this way it is always the same code path that is responsible for
408 // updating and resolving an inline cache
409 //
410 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
411 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
412 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
413 //
414 // In both of these cases the only thing being modifed is the jump/call target and these
415 // transitions are mt_safe
416
417 Thread *thread = Thread::current();
418 if (info.to_interpreter() || info.to_aot()) {
419 // Call to interpreter
420 if (info.is_optimized() && is_optimized()) {
421 assert(is_clean(), "unsafe IC path");
422 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
423 // the call analysis (callee structure) specifies that the call is optimized
424 // (either because of CHA or the static target is final)
425 // At code generation time, this call has been emitted as static call
426 // Call via stub
427 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
428 methodHandle method (thread, (Method*)info.cached_metadata());
429 _call->set_to_interpreted(method, info);
430
431 if (TraceICs) {
432 ResourceMark rm(thread);
433 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
434 p2i(instruction_address()),
435 (info.to_aot() ? "aot" : "interpreter"),
436 method->print_value_string());
437 }
438 } else {
439 // Call via method-klass-holder
440 CompiledICHolder* holder = info.claim_cached_icholder();
441 if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
442 delete holder;
443 return false;
444 }
445 if (TraceICs) {
446 ResourceMark rm(thread);
447 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
448 }
449 }
450 } else {
451 // Call to compiled code
452 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
453 #ifdef ASSERT
454 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
455 assert (cb != NULL && cb->is_compiled(), "must be compiled!");
456 #endif /* ASSERT */
457
458 // This is MT safe if we come from a clean-cache and go through a
459 // non-verified entry point
460 bool safe = SafepointSynchronize::is_at_safepoint() ||
461 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
462
463 if (!safe) {
464 if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
465 return false;
466 }
467 } else {
468 if (is_optimized()) {
469 set_ic_destination(info.entry());
470 } else {
471 set_ic_destination_and_value(info.entry(), info.cached_metadata());
472 }
473 }
474
475 if (TraceICs) {
476 ResourceMark rm(thread);
477 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
478 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
479 p2i(instruction_address()),
480 ((Klass*)info.cached_metadata())->print_value_string(),
481 (safe) ? "" : "via stub");
482 }
483 }
484 // We can't check this anymore. With lazy deopt we could have already
485 // cleaned this IC entry before we even return. This is possible if
486 // we ran out of space in the inline cache buffer trying to do the
487 // set_next and we safepointed to free up space. This is a benign
488 // race because the IC entry was complete when we safepointed so
489 // cleaning it immediately is harmless.
490 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
491 return true;
492 }
493
494
495 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
496 // static_bound: The call can be static bound. If it isn't also optimized, the property
497 // wasn't provable at time of compilation. An optimized call will have any necessary
498 // null check, while a static_bound won't. A static_bound (but not optimized) must
499 // therefore use the unverified entry point.
500 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
501 Klass* receiver_klass,
502 bool is_optimized,
503 bool static_bound,
504 bool caller_is_nmethod,
505 CompiledICInfo& info,
506 TRAPS) {
507 CompiledMethod* method_code = method->code();
508
509 address entry = NULL;
510 if (method_code != NULL && method_code->is_in_use()) {
|
557
558 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
559 // This call site might have become stale so inspect it carefully.
560 address dest = cm->call_wrapper_at(call_site->addr())->destination();
561 return is_icholder_entry(dest);
562 }
563
564 // Release the CompiledICHolder* associated with this call site is there is one.
565 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
566 assert(cm->is_nmethod(), "must be nmethod");
567 // This call site might have become stale so inspect it carefully.
568 NativeCall* call = nativeCall_at(call_site->addr());
569 if (is_icholder_entry(call->destination())) {
570 NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
571 InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
572 }
573 }
574
575 // ----------------------------------------------------------------------------
576
577 void CompiledStaticCall::set_to_clean(bool in_use) {
578 // in_use is unused but needed to match template function in CompiledMethod
579 assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
580 // Reset call site
581 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
582 set_destination_mt_safe(resolve_call_stub());
583
584 // Do not reset stub here: It is too expensive to call find_stub.
585 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
586 // both the call and its stub.
587 }
588
589 bool CompiledStaticCall::is_clean() const {
590 return destination() == resolve_call_stub();
591 }
592
593 bool CompiledStaticCall::is_call_to_compiled() const {
594 return CodeCache::contains(destination());
595 }
596
597 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
598 // It is a call to interpreted, if it calls to a stub. Hence, the destination
599 // must be in the stub part of the nmethod that contains the call
600 CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
601 return cm->stub_contains(destination());
602 }
603
604 bool CompiledDirectStaticCall::is_call_to_far() const {
605 // It is a call to aot method, if it calls to a stub. Hence, the destination
|
572
573 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
574 // This call site might have become stale so inspect it carefully.
575 address dest = cm->call_wrapper_at(call_site->addr())->destination();
576 return is_icholder_entry(dest);
577 }
578
579 // Release the CompiledICHolder* associated with this call site is there is one.
580 void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
581 assert(cm->is_nmethod(), "must be nmethod");
582 // This call site might have become stale so inspect it carefully.
583 NativeCall* call = nativeCall_at(call_site->addr());
584 if (is_icholder_entry(call->destination())) {
585 NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
586 InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
587 }
588 }
589
590 // ----------------------------------------------------------------------------
591
592 bool CompiledStaticCall::set_to_clean(bool in_use) {
593 // in_use is unused but needed to match template function in CompiledMethod
594 assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
595 // Reset call site
596 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
597 set_destination_mt_safe(resolve_call_stub());
598
599 // Do not reset stub here: It is too expensive to call find_stub.
600 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
601 // both the call and its stub.
602 return true;
603 }
604
605 bool CompiledStaticCall::is_clean() const {
606 return destination() == resolve_call_stub();
607 }
608
609 bool CompiledStaticCall::is_call_to_compiled() const {
610 return CodeCache::contains(destination());
611 }
612
613 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
614 // It is a call to interpreted, if it calls to a stub. Hence, the destination
615 // must be in the stub part of the nmethod that contains the call
616 CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
617 return cm->stub_contains(destination());
618 }
619
620 bool CompiledDirectStaticCall::is_call_to_far() const {
621 // It is a call to aot method, if it calls to a stub. Hence, the destination
|