< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page
rev 8961 : [mq]: diff-shenandoah.patch

*** 2444,2454 **** __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); // Load the oop from the handle __ movptr(obj_reg, Address(oop_handle_reg, 0)); ! if (UseBiasedLocking) { __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); } // Load immediate 1 into swap_reg %rax --- 2444,2454 ---- __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size)); // Load the oop from the handle __ movptr(obj_reg, Address(oop_handle_reg, 0)); ! oopDesc::bs()->interpreter_read_barrier(masm, obj_reg); if (UseBiasedLocking) { __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); } // Load immediate 1 into swap_reg %rax
*** 2613,2622 **** --- 2613,2623 ---- Label slow_path_unlock; if (method->is_synchronized()) { // Get locked oop from the handle we passed to jni __ movptr(obj_reg, Address(oop_handle_reg, 0)); + oopDesc::bs()->interpreter_read_barrier(masm, obj_reg); Label done; if (UseBiasedLocking) { __ biased_locking_exit(obj_reg, old_hdr, done);
< prev index next >