< prev index next >

src/cpu/x86/vm/sharedRuntime_x86_64.cpp

Print this page




2429   const Register old_hdr  = r13;  // value of old header at unlock time
2430 
2431   Label slow_path_lock;
2432   Label lock_done;
2433 
2434   if (method->is_synchronized()) {
2435     assert(!is_critical_native, "unhandled");
2436 
2437 
2438     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2439 
2440     // Get the handle (the 2nd argument)
2441     __ mov(oop_handle_reg, c_rarg1);
2442 
2443     // Get address of the box
2444 
2445     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2446 
2447     // Load the oop from the handle
2448     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2449 
2450     if (UseBiasedLocking) {
2451       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2452     }
2453 
2454     // Load immediate 1 into swap_reg %rax
2455     __ movl(swap_reg, 1);
2456 
2457     // Load (object->mark() | 1) into swap_reg %rax
2458     __ orptr(swap_reg, Address(obj_reg, 0));
2459 
2460     // Save (object->mark() | 1) into BasicLock's displaced header
2461     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2462 
2463     if (os::is_MP()) {
2464       __ lock();
2465     }
2466 
2467     // src -> dest iff dest == rax else rax <- dest
2468     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2469     __ jcc(Assembler::equal, lock_done);


2598 
2599   // change thread state
2600   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2601   __ bind(after_transition);
2602 
2603   Label reguard;
2604   Label reguard_done;
2605   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2606   __ jcc(Assembler::equal, reguard);
2607   __ bind(reguard_done);
2608 
2609   // native result if any is live
2610 
2611   // Unlock
2612   Label unlock_done;
2613   Label slow_path_unlock;
2614   if (method->is_synchronized()) {
2615 
2616     // Get locked oop from the handle we passed to jni
2617     __ movptr(obj_reg, Address(oop_handle_reg, 0));

2618 
2619     Label done;
2620 
2621     if (UseBiasedLocking) {
2622       __ biased_locking_exit(obj_reg, old_hdr, done);
2623     }
2624 
2625     // Simple recursive lock?
2626 
2627     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2628     __ jcc(Assembler::equal, done);
2629 
2630     // Must save rax if if it is live now because cmpxchg must use it
2631     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2632       save_native_result(masm, ret_type, stack_slots);
2633     }
2634 
2635 
2636     // get address of the stack lock
2637     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));




2429   const Register old_hdr  = r13;  // value of old header at unlock time
2430 
2431   Label slow_path_lock;
2432   Label lock_done;
2433 
2434   if (method->is_synchronized()) {
2435     assert(!is_critical_native, "unhandled");
2436 
2437 
2438     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2439 
2440     // Get the handle (the 2nd argument)
2441     __ mov(oop_handle_reg, c_rarg1);
2442 
2443     // Get address of the box
2444 
2445     __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2446 
2447     // Load the oop from the handle
2448     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2449     oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
2450     if (UseBiasedLocking) {
2451       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2452     }
2453 
2454     // Load immediate 1 into swap_reg %rax
2455     __ movl(swap_reg, 1);
2456 
2457     // Load (object->mark() | 1) into swap_reg %rax
2458     __ orptr(swap_reg, Address(obj_reg, 0));
2459 
2460     // Save (object->mark() | 1) into BasicLock's displaced header
2461     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2462 
2463     if (os::is_MP()) {
2464       __ lock();
2465     }
2466 
2467     // src -> dest iff dest == rax else rax <- dest
2468     __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2469     __ jcc(Assembler::equal, lock_done);


2598 
2599   // change thread state
2600   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2601   __ bind(after_transition);
2602 
2603   Label reguard;
2604   Label reguard_done;
2605   __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2606   __ jcc(Assembler::equal, reguard);
2607   __ bind(reguard_done);
2608 
2609   // native result if any is live
2610 
2611   // Unlock
2612   Label unlock_done;
2613   Label slow_path_unlock;
2614   if (method->is_synchronized()) {
2615 
2616     // Get locked oop from the handle we passed to jni
2617     __ movptr(obj_reg, Address(oop_handle_reg, 0));
2618     oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
2619 
2620     Label done;
2621 
2622     if (UseBiasedLocking) {
2623       __ biased_locking_exit(obj_reg, old_hdr, done);
2624     }
2625 
2626     // Simple recursive lock?
2627 
2628     __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2629     __ jcc(Assembler::equal, done);
2630 
2631     // Must save rax if if it is live now because cmpxchg must use it
2632     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2633       save_native_result(masm, ret_type, stack_slots);
2634     }
2635 
2636 
2637     // get address of the stack lock
2638     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));


< prev index next >