2352
2353 // Get the handle (the 2nd argument)
2354 __ mov(oop_handle_reg, c_rarg1);
2355
2356 // Get address of the box
2357
2358 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2359
2360 // Load the oop from the handle
2361 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2362
2363 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
2364 if (UseBiasedLocking) {
2365 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2366 }
2367
2368 // Load immediate 1 into swap_reg %rax
2369 __ movl(swap_reg, 1);
2370
2371 // Load (object->mark() | 1) into swap_reg %rax
2372 __ orptr(swap_reg, Address(obj_reg, 0));
2373
2374 // Save (object->mark() | 1) into BasicLock's displaced header
2375 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2376
2377 if (os::is_MP()) {
2378 __ lock();
2379 }
2380
2381 // src -> dest iff dest == rax else rax <- dest
2382 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2383 __ jcc(Assembler::equal, lock_done);
2384
2385 // Hmm should this move to the slow path code area???
2386
2387 // Test if the oopMark is an obvious stack pointer, i.e.,
2388 // 1) (mark & 3) == 0, and
2389 // 2) rsp <= mark < mark + os::pagesize()
2390 // These 3 tests can be done by evaluating the following
2391 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2516
2517 Label reguard;
2518 Label reguard_done;
2519 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2520 __ jcc(Assembler::equal, reguard);
2521 __ bind(reguard_done);
2522
2523 // native result if any is live
2524
2525 // Unlock
2526 Label unlock_done;
2527 Label slow_path_unlock;
2528 if (method->is_synchronized()) {
2529
2530 // Get locked oop from the handle we passed to jni
2531 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2532 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
2533
2534 Label done;
2535
2536 if (UseBiasedLocking) {
2537 __ biased_locking_exit(obj_reg, old_hdr, done);
2538 }
2539
2540 // Simple recursive lock?
2541
2542 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2543 __ jcc(Assembler::equal, done);
2544
2545 // Must save rax if if it is live now because cmpxchg must use it
2546 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2547 save_native_result(masm, ret_type, stack_slots);
2548 }
2549
2550
2551 // get address of the stack lock
2552 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2553 // get old displaced header
2554 __ movptr(old_hdr, Address(rax, 0));
2555
|
2352
2353 // Get the handle (the 2nd argument)
2354 __ mov(oop_handle_reg, c_rarg1);
2355
2356 // Get address of the box
2357
2358 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2359
2360 // Load the oop from the handle
2361 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2362
2363 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
2364 if (UseBiasedLocking) {
2365 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2366 }
2367
2368 // Load immediate 1 into swap_reg %rax
2369 __ movl(swap_reg, 1);
2370
2371 // Load (object->mark() | 1) into swap_reg %rax
2372 __ shenandoah_store_addr_check(obj_reg); // Access mark word
2373 __ orptr(swap_reg, Address(obj_reg, 0));
2374
2375 // Save (object->mark() | 1) into BasicLock's displaced header
2376 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2377
2378 if (os::is_MP()) {
2379 __ lock();
2380 }
2381
2382 // src -> dest iff dest == rax else rax <- dest
2383 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2384 __ jcc(Assembler::equal, lock_done);
2385
2386 // Hmm should this move to the slow path code area???
2387
2388 // Test if the oopMark is an obvious stack pointer, i.e.,
2389 // 1) (mark & 3) == 0, and
2390 // 2) rsp <= mark < mark + os::pagesize()
2391 // These 3 tests can be done by evaluating the following
2392 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2517
2518 Label reguard;
2519 Label reguard_done;
2520 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2521 __ jcc(Assembler::equal, reguard);
2522 __ bind(reguard_done);
2523
2524 // native result if any is live
2525
2526 // Unlock
2527 Label unlock_done;
2528 Label slow_path_unlock;
2529 if (method->is_synchronized()) {
2530
2531 // Get locked oop from the handle we passed to jni
2532 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2533 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
2534
2535 Label done;
2536
2537 __ shenandoah_store_addr_check(obj_reg);
2538 if (UseBiasedLocking) {
2539 __ biased_locking_exit(obj_reg, old_hdr, done);
2540 }
2541
2542 // Simple recursive lock?
2543
2544 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2545 __ jcc(Assembler::equal, done);
2546
2547 // Must save rax if if it is live now because cmpxchg must use it
2548 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2549 save_native_result(masm, ret_type, stack_slots);
2550 }
2551
2552
2553 // get address of the stack lock
2554 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2555 // get old displaced header
2556 __ movptr(old_hdr, Address(rax, 0));
2557
|