1822 const Register tmp = lr;
1823
1824 Label slow_path_lock;
1825 Label lock_done;
1826
1827 if (method->is_synchronized()) {
1828 assert(!is_critical_native, "unhandled");
1829
1830 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1831
1832 // Get the handle (the 2nd argument)
1833 __ mov(oop_handle_reg, c_rarg1);
1834
1835 // Get address of the box
1836
1837 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1838
1839 // Load the oop from the handle
1840 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1841
1842 if (UseBiasedLocking) {
1843 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1844 }
1845
1846 // Load (object->mark() | 1) into swap_reg %r0
1847 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1848 __ orr(swap_reg, rscratch1, 1);
1849
1850 // Save (object->mark() | 1) into BasicLock's displaced header
1851 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1852
1853 // src -> dest iff dest == r0 else r0 <- dest
1854 { Label here;
1855 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1856 }
1857
1858 // Hmm should this move to the slow path code area???
1859
1860 // Test if the oopMark is an obvious stack pointer, i.e.,
1861 // 1) (mark & 3) == 0, and
1983 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1984 __ stlrw(rscratch1, rscratch2);
1985 __ bind(after_transition);
1986
1987 Label reguard;
1988 Label reguard_done;
1989 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1990 __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1991 __ br(Assembler::EQ, reguard);
1992 __ bind(reguard_done);
1993
1994 // native result if any is live
1995
1996 // Unlock
1997 Label unlock_done;
1998 Label slow_path_unlock;
1999 if (method->is_synchronized()) {
2000
2001 // Get locked oop from the handle we passed to jni
2002 __ ldr(obj_reg, Address(oop_handle_reg, 0));
2003
2004 Label done;
2005
2006 if (UseBiasedLocking) {
2007 __ biased_locking_exit(obj_reg, old_hdr, done);
2008 }
2009
2010 // Simple recursive lock?
2011
2012 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2013 __ cbz(rscratch1, done);
2014
2015 // Must save r0 if if it is live now because cmpxchg must use it
2016 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2017 save_native_result(masm, ret_type, stack_slots);
2018 }
2019
2020
2021 // get address of the stack lock
2022 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
1822 const Register tmp = lr;
1823
1824 Label slow_path_lock;
1825 Label lock_done;
1826
1827 if (method->is_synchronized()) {
1828 assert(!is_critical_native, "unhandled");
1829
1830 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1831
1832 // Get the handle (the 2nd argument)
1833 __ mov(oop_handle_reg, c_rarg1);
1834
1835 // Get address of the box
1836
1837 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1838
1839 // Load the oop from the handle
1840 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1841
1842 __ resolve_for_write(OOP_NOT_NULL, obj_reg);
1843
1844 if (UseBiasedLocking) {
1845 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1846 }
1847
1848 // Load (object->mark() | 1) into swap_reg %r0
1849 __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1850 __ orr(swap_reg, rscratch1, 1);
1851
1852 // Save (object->mark() | 1) into BasicLock's displaced header
1853 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1854
1855 // src -> dest iff dest == r0 else r0 <- dest
1856 { Label here;
1857 __ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1858 }
1859
1860 // Hmm should this move to the slow path code area???
1861
1862 // Test if the oopMark is an obvious stack pointer, i.e.,
1863 // 1) (mark & 3) == 0, and
1985 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1986 __ stlrw(rscratch1, rscratch2);
1987 __ bind(after_transition);
1988
1989 Label reguard;
1990 Label reguard_done;
1991 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1992 __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1993 __ br(Assembler::EQ, reguard);
1994 __ bind(reguard_done);
1995
1996 // native result if any is live
1997
1998 // Unlock
1999 Label unlock_done;
2000 Label slow_path_unlock;
2001 if (method->is_synchronized()) {
2002
2003 // Get locked oop from the handle we passed to jni
2004 __ ldr(obj_reg, Address(oop_handle_reg, 0));
2005
2006 __ resolve_for_write(OOP_NOT_NULL, obj_reg);
2007
2008 Label done;
2009
2010 if (UseBiasedLocking) {
2011 __ biased_locking_exit(obj_reg, old_hdr, done);
2012 }
2013
2014 // Simple recursive lock?
2015
2016 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2017 __ cbz(rscratch1, done);
2018
2019 // Must save r0 if if it is live now because cmpxchg must use it
2020 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2021 save_native_result(masm, ret_type, stack_slots);
2022 }
2023
2024
2025 // get address of the stack lock
2026 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|