< prev index next >

src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp

Print this page
rev 12121 : Use oopDesc::mark_offset_in_bytes() instead of just 0 to access object mark word in assembly.


1807   if (method->is_synchronized()) {
1808     assert(!is_critical_native, "unhandled");
1809 
1810     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1811 
1812     // Get the handle (the 2nd argument)
1813     __ mov(oop_handle_reg, c_rarg1);
1814 
1815     // Get address of the box
1816 
1817     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1818 
1819     // Load the oop from the handle
1820     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1821 
1822     if (UseBiasedLocking) {
1823       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1824     }
1825 
1826     // Load (object->mark() | 1) into swap_reg %r0
1827     __ ldr(rscratch1, Address(obj_reg, 0));
1828     __ orr(swap_reg, rscratch1, 1);
1829 
1830     // Save (object->mark() | 1) into BasicLock's displaced header
1831     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1832 
1833     // src -> dest iff dest == r0 else r0 <- dest
1834     { Label here;

1835       __ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1836     }
1837 
1838     // Hmm should this move to the slow path code area???
1839 
1840     // Test if the oopMark is an obvious stack pointer, i.e.,
1841     //  1) (mark & 3) == 0, and
1842     //  2) sp <= mark < mark + os::pagesize()
1843     // These 3 tests can be done by evaluating the following
1844     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1845     // assuming both stack pointer and pagesize have their
1846     // least significant 2 bits clear.
1847     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1848 
1849     __ sub(swap_reg, sp, swap_reg);
1850     __ neg(swap_reg, swap_reg);
1851     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1852 
1853     // Save the test result, for recursive case, the result is zero
1854     __ str(swap_reg, Address(lock_reg, mark_word_offset));


1992     }
1993 
1994     // Simple recursive lock?
1995 
1996     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1997     __ cbz(rscratch1, done);
1998 
1999     // Must save r0 if if it is live now because cmpxchg must use it
2000     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2001       save_native_result(masm, ret_type, stack_slots);
2002     }
2003 
2004 
2005     // get address of the stack lock
2006     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2007     //  get old displaced header
2008     __ ldr(old_hdr, Address(r0, 0));
2009 
2010     // Atomic swap old header if oop still contains the stack lock
2011     Label succeed;

2012     __ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
2013     __ bind(succeed);
2014 
2015     // slow path re-enters here
2016     __ bind(unlock_done);
2017     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2018       restore_native_result(masm, ret_type, stack_slots);
2019     }
2020 
2021     __ bind(done);
2022   }
2023 
2024   Label dtrace_method_exit, dtrace_method_exit_done;
2025   {
2026     unsigned long offset;
2027     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2028     __ ldrb(rscratch1, Address(rscratch1, offset));
2029     __ cbnzw(rscratch1, dtrace_method_exit);
2030     __ bind(dtrace_method_exit_done);
2031   }




1807   if (method->is_synchronized()) {
1808     assert(!is_critical_native, "unhandled");
1809 
1810     const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1811 
1812     // Get the handle (the 2nd argument)
1813     __ mov(oop_handle_reg, c_rarg1);
1814 
1815     // Get address of the box
1816 
1817     __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1818 
1819     // Load the oop from the handle
1820     __ ldr(obj_reg, Address(oop_handle_reg, 0));
1821 
1822     if (UseBiasedLocking) {
1823       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
1824     }
1825 
1826     // Load (object->mark() | 1) into swap_reg %r0
1827     __ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1828     __ orr(swap_reg, rscratch1, 1);
1829 
1830     // Save (object->mark() | 1) into BasicLock's displaced header
1831     __ str(swap_reg, Address(lock_reg, mark_word_offset));
1832 
1833     // src -> dest iff dest == r0 else r0 <- dest
1834     { Label here;
1835       assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
1836       __ cmpxchgptr(r0, lock_reg, obj_reg, rscratch1, lock_done, /*fallthrough*/NULL);
1837     }
1838 
1839     // Hmm should this move to the slow path code area???
1840 
1841     // Test if the oopMark is an obvious stack pointer, i.e.,
1842     //  1) (mark & 3) == 0, and
1843     //  2) sp <= mark < mark + os::pagesize()
1844     // These 3 tests can be done by evaluating the following
1845     // expression: ((mark - sp) & (3 - os::vm_page_size())),
1846     // assuming both stack pointer and pagesize have their
1847     // least significant 2 bits clear.
1848     // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
1849 
1850     __ sub(swap_reg, sp, swap_reg);
1851     __ neg(swap_reg, swap_reg);
1852     __ ands(swap_reg, swap_reg, 3 - os::vm_page_size());
1853 
1854     // Save the test result, for recursive case, the result is zero
1855     __ str(swap_reg, Address(lock_reg, mark_word_offset));


1993     }
1994 
1995     // Simple recursive lock?
1996 
1997     __ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
1998     __ cbz(rscratch1, done);
1999 
2000     // Must save r0 if if it is live now because cmpxchg must use it
2001     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2002       save_native_result(masm, ret_type, stack_slots);
2003     }
2004 
2005 
2006     // get address of the stack lock
2007     __ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
2008     //  get old displaced header
2009     __ ldr(old_hdr, Address(r0, 0));
2010 
2011     // Atomic swap old header if oop still contains the stack lock
2012     Label succeed;
2013     assert(oopDesc::mark_offset_in_bytes() == 0, "assumption");
2014     __ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
2015     __ bind(succeed);
2016 
2017     // slow path re-enters here
2018     __ bind(unlock_done);
2019     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2020       restore_native_result(masm, ret_type, stack_slots);
2021     }
2022 
2023     __ bind(done);
2024   }
2025 
2026   Label dtrace_method_exit, dtrace_method_exit_done;
2027   {
2028     unsigned long offset;
2029     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2030     __ ldrb(rscratch1, Address(rscratch1, offset));
2031     __ cbnzw(rscratch1, dtrace_method_exit);
2032     __ bind(dtrace_method_exit_done);
2033   }


< prev index next >