< prev index next >

src/hotspot/share/code/compiledIC.cpp


*** 124,134 **** tty->cr(); } { CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); - MutexLockerEx pl(CompiledICLocker::is_safe(cb->as_compiled_method()) ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); assert(cb != NULL && cb->is_compiled(), "must be compiled"); _call->set_destination_mt_safe(entry_point); } if (is_optimized() || is_icstub) { --- 124,133 ---- ***************
*** 364,374 **** address entry = _call->get_resolve_call_stub(is_optimized()); // A zombie transition will always be safe, since the metadata has already been set to NULL, so // we only need to patch the destination ! bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || CompiledICLocker::is_safe(_method); if (safe_transition) { // Kill any leftover stub we might have too clear_ic_stub(); if (is_optimized()) { --- 363,373 ---- address entry = _call->get_resolve_call_stub(is_optimized()); // A zombie transition will always be safe, since the metadata has already been set to NULL, so // we only need to patch the destination ! bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); if (safe_transition) { // Kill any leftover stub we might have too clear_ic_stub(); if (is_optimized()) { ***************
*** 417,428 **** Thread *thread = Thread::current(); if (info.to_interpreter() || info.to_aot()) { // Call to interpreter if (info.is_optimized() && is_optimized()) { ! assert(is_clean(), "unsafe IC path"); ! MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); // the call analysis (callee structure) specifies that the call is optimized // (either because of CHA or the static target is final) // At code generation time, this call has been emitted as static call // Call via stub assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); --- 416,426 ---- Thread *thread = Thread::current(); if (info.to_interpreter() || info.to_aot()) { // Call to interpreter if (info.is_optimized() && is_optimized()) { ! assert(is_clean(), "unsafe IC path"); // the call analysis (callee structure) specifies that the call is optimized // (either because of CHA or the static target is final) // At code generation time, this call has been emitted as static call // Call via stub assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); ***************
*** 592,602 **** bool CompiledStaticCall::set_to_clean(bool in_use) { // in_use is unused but needed to match template function in CompiledMethod assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); // Reset call site - MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); set_destination_mt_safe(resolve_call_stub()); // Do not reset stub here: It is too expensive to call find_stub. // Instead, rely on caller (nmethod::clear_inline_caches) to clear // both the call and its stub. --- 590,599 ---- ***************
*** 638,648 **** set_destination_mt_safe(entry); } void CompiledStaticCall::set(const StaticCallInfo& info) { assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call"); - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); // Updating a cache to the wrong entry can cause bugs that are very hard // to track down - if cache entry gets invalid - we just clean it. In // this way it is always the same code path that is responsible for // updating and resolving an inline cache assert(is_clean(), "do not update a call entry - use clean"); --- 635,644 ----
< prev index next >