< prev index next >
src/hotspot/share/runtime/sharedRuntime.cpp
Print this page
@@ -2035,11 +2035,12 @@
JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
// Disable ObjectSynchronizer::quick_enter() in default config
// on AARCH64 and ARM until JDK-8153107 is resolved.
if (ARM_ONLY((SyncFlags & 256) != 0 &&)
AARCH64_ONLY((SyncFlags & 256) != 0 &&)
- !SafepointSynchronize::is_synchronizing()) {
+ !SafepointSynchronize::is_synchronizing() &&
+ (!EnableValhalla || !_obj->klass()->is_value())) {
// Only try quick_enter() if we're not trying to reach a safepoint
// so that the calling thread reaches the safepoint more quickly.
if (ObjectSynchronizer::quick_enter(_obj, thread, lock)) return;
}
// NO_ASYNC required because an async exception on the state transition destructor
@@ -2052,15 +2053,21 @@
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
}
Handle h_obj(THREAD, obj);
if (UseBiasedLocking) {
// Retry fast entry if bias is revoked to avoid unnecessary inflation
- ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
+ ObjectSynchronizer::fast_enter(h_obj, lock, true, THREAD);
} else {
- ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
+ ObjectSynchronizer::slow_enter(h_obj, lock, THREAD);
+ }
+ if (HAS_PENDING_EXCEPTION) {
+ // Deoptimize the (compiled) caller frame
+ assert(EnableValhalla && h_obj()->klass()->is_value(), "must be a value type");
+ RegisterMap reg_map(thread);
+ frame caller_frame = thread->last_frame().sender(®_map);
+ Deoptimization::deoptimize_frame(thread, caller_frame.id());
}
- assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
JRT_BLOCK_END
JRT_END
// Handles the uncommon cases of monitor unlocking in compiled code
JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock, JavaThread * THREAD))
< prev index next >