1809
1810 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1811
1812 // Get the handle (the 2nd argument)
1813 __ mov(oop_handle_reg, c_rarg1);
1814
1815 // Get address of the box
1816
1817 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1818
1819 // Load the oop from the handle
1820 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1821
1822 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
1823
1824 if (UseBiasedLocking) {
1825 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1826 }
1827
1828 // Load (object->mark() | 1) into swap_reg r0
1829 __ ldr(rscratch1, Address(obj_reg, 0));
1830 __ orr(swap_reg, rscratch1, 1);
1831
1832 // Save (object->mark() | 1) into BasicLock's displaced header
1833 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1834
1835 // src -> dest iff dest == r0 else r0 <- dest
1836 { Label here;
1837 __ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1838 }
1839
1840 // Hmm should this move to the slow path code area???
1841
1842 // Test if the oopMark is an obvious stack pointer, i.e.,
1843 // 1) (mark & 3) == 0, and
1844 // 2) sp <= mark < mark + os::pagesize()
1845 // These 3 tests can be done by evaluating the following
1846 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1847 // assuming both stack pointer and pagesize have their
1848 // least significant 2 bits clear.
1973 Label reguard;
1974 Label reguard_done;
1975 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1976 __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1977 __ br(Assembler::EQ, reguard);
1978 __ bind(reguard_done);
1979
1980 // native result if any is live
1981
1982 // Unlock
1983 Label unlock_done;
1984 Label slow_path_unlock;
1985 if (method->is_synchronized()) {
1986
1987 // Get locked oop from the handle we passed to jni
1988 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1989 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
1990
1991 Label done;
1992
1993 __ shenandoah_store_check(obj_reg);
1994
1995 if (UseBiasedLocking) {
1996 __ biased_locking_exit(obj_reg, old_hdr, done);
1997 }
1998
1999 // Simple recursive lock?
2000
2001 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2002 __ cbz(rscratch1, done);
2003
2004 // Must save r0 if if it is live now because cmpxchg must use it
2005 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2006 save_native_result(masm, ret_type, stack_slots);
2007 }
2008
2009
2010 // get address of the stack lock
2011 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2012 // get old displaced header
2013 __ ldr(old_hdr, Address(r0, 0));
|
1809
1810 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1811
1812 // Get the handle (the 2nd argument)
1813 __ mov(oop_handle_reg, c_rarg1);
1814
1815 // Get address of the box
1816
1817 __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1818
1819 // Load the oop from the handle
1820 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1821
1822 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
1823
1824 if (UseBiasedLocking) {
1825 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1826 }
1827
1828 // Load (object->mark() | 1) into swap_reg r0
1829 __ shenandoah_store_addr_check(obj_reg); // Access mark word
1830 __ ldr(rscratch1, Address(obj_reg, 0));
1831 __ orr(swap_reg, rscratch1, 1);
1832
1833 // Save (object->mark() | 1) into BasicLock's displaced header
1834 __ str(swap_reg, Address(lock_reg, mark_word_offset));
1835
1836 // src -> dest iff dest == r0 else r0 <- dest
1837 { Label here;
1838 __ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1839 }
1840
1841 // Hmm should this move to the slow path code area???
1842
1843 // Test if the oopMark is an obvious stack pointer, i.e.,
1844 // 1) (mark & 3) == 0, and
1845 // 2) sp <= mark < mark + os::pagesize()
1846 // These 3 tests can be done by evaluating the following
1847 // expression: ((mark - sp) & (3 - os::vm_page_size())),
1848 // assuming both stack pointer and pagesize have their
1849 // least significant 2 bits clear.
1974 Label reguard;
1975 Label reguard_done;
1976 __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1977 __ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
1978 __ br(Assembler::EQ, reguard);
1979 __ bind(reguard_done);
1980
1981 // native result if any is live
1982
1983 // Unlock
1984 Label unlock_done;
1985 Label slow_path_unlock;
1986 if (method->is_synchronized()) {
1987
1988 // Get locked oop from the handle we passed to jni
1989 __ ldr(obj_reg, Address(oop_handle_reg, 0));
1990 oopDesc::bs()->interpreter_write_barrier(masm, obj_reg);
1991
1992 Label done;
1993
1994 __ shenandoah_store_addr_check(obj_reg);
1995
1996 if (UseBiasedLocking) {
1997 __ biased_locking_exit(obj_reg, old_hdr, done);
1998 }
1999
2000 // Simple recursive lock?
2001
2002 __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2003 __ cbz(rscratch1, done);
2004
2005 // Must save r0 if if it is live now because cmpxchg must use it
2006 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2007 save_native_result(masm, ret_type, stack_slots);
2008 }
2009
2010
2011 // get address of the stack lock
2012 __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2013 // get old displaced header
2014 __ ldr(old_hdr, Address(r0, 0));
|