< prev index next >

src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp

Print this page
rev 9227 : 8143067: aarch64: guarantee failure in javac
Summary: Fix adrp going out of range during code relocation
Reviewed-by: duke


1735 
1736     // Now get the handle
1737     __ lea(c_rarg1, Address(sp, klass_offset));
1738     // and protect the arg if we must spill
1739     c_arg--;
1740   }
1741 
1742   // Change state to native (we save the return address in the thread, since it might not
1743   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1744   // points into the right code segment. It does not have to be the correct return pc.
1745   // We use the same pc/oopMap repeatedly when we call out
1746 
1747   intptr_t the_pc = (intptr_t) __ pc();
1748   oop_maps->add_gc_map(the_pc - start, map);
1749 
1750   __ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
1751 
1752   Label dtrace_method_entry, dtrace_method_entry_done;
1753   {
1754     unsigned long offset;
1755     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1756     __ ldrb(rscratch1, Address(rscratch1, offset));
1757     __ cbnzw(rscratch1, dtrace_method_entry);
1758     __ bind(dtrace_method_entry_done);
1759   }
1760 
1761   // RedefineClasses() tracing support for obsolete method entry
1762   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1763     // protect the args we've loaded
1764     save_args(masm, total_c_args, c_arg, out_regs);
1765     __ mov_metadata(c_rarg1, method());
1766     __ call_VM_leaf(
1767       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1768       rthread, c_rarg1);
1769     restore_args(masm, total_c_args, c_arg, out_regs);
1770   }
1771 
1772   // Lock a synchronized method
1773 
1774   // Register definitions used by locking and unlocking
1775 


1912 
1913       // Force this write out before the read below
1914       __ dmb(Assembler::SY);
1915     } else {
1916       __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1917       __ stlrw(rscratch1, rscratch2);
1918 
1919       // Write serialization page so VM thread can do a pseudo remote membar.
1920       // We use the current thread pointer to calculate a thread specific
1921       // offset to write to within the page. This minimizes bus traffic
1922       // due to cache line collision.
1923       __ serialize_memory(rthread, r2);
1924     }
1925   }
1926 
1927   // check for safepoint operation in progress and/or pending suspend requests
1928   Label safepoint_in_progress, safepoint_in_progress_done;
1929   {
1930     assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
1931     unsigned long offset;
1932     __ adrp(rscratch1,
1933             ExternalAddress((address)SafepointSynchronize::address_of_state()),
1934             offset);
1935     __ ldrw(rscratch1, Address(rscratch1, offset));
1936     __ cbnzw(rscratch1, safepoint_in_progress);
1937     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1938     __ cbnzw(rscratch1, safepoint_in_progress);
1939     __ bind(safepoint_in_progress_done);
1940   }
1941 
1942   // change thread state
1943   Label after_transition;
1944   __ mov(rscratch1, _thread_in_Java);
1945   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1946   __ stlrw(rscratch1, rscratch2);
1947   __ bind(after_transition);
1948 
1949   Label reguard;
1950   Label reguard_done;
1951   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1952   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_disabled);


1985     //  get old displaced header
1986     __ ldr(old_hdr, Address(r0, 0));
1987 
1988     // Atomic swap old header if oop still contains the stack lock
1989     Label succeed;
1990     __ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1991     __ bind(succeed);
1992 
1993     // slow path re-enters here
1994     __ bind(unlock_done);
1995     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1996       restore_native_result(masm, ret_type, stack_slots);
1997     }
1998 
1999     __ bind(done);
2000   }
2001 
2002   Label dtrace_method_exit, dtrace_method_exit_done;
2003   {
2004     unsigned long offset;
2005     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2006     __ ldrb(rscratch1, Address(rscratch1, offset));
2007     __ cbnzw(rscratch1, dtrace_method_exit);
2008     __ bind(dtrace_method_exit_done);
2009   }
2010 
2011   __ reset_last_Java_frame(false, true);
2012 
2013   // Unpack oop result
2014   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2015       Label L;
2016       __ cbz(r0, L);
2017       __ ldr(r0, Address(r0, 0));
2018       __ bind(L);
2019       __ verify_oop(r0);
2020   }
2021 
2022   if (!is_critical_native) {
2023     // reset handle block
2024     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2025     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));




1735 
1736     // Now get the handle
1737     __ lea(c_rarg1, Address(sp, klass_offset));
1738     // and protect the arg if we must spill
1739     c_arg--;
1740   }
1741 
1742   // Change state to native (we save the return address in the thread, since it might not
1743   // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1744   // points into the right code segment. It does not have to be the correct return pc.
1745   // We use the same pc/oopMap repeatedly when we call out
1746 
1747   intptr_t the_pc = (intptr_t) __ pc();
1748   oop_maps->add_gc_map(the_pc - start, map);
1749 
1750   __ set_last_Java_frame(sp, noreg, (address)the_pc, rscratch1);
1751 
1752   Label dtrace_method_entry, dtrace_method_entry_done;
1753   {
1754     unsigned long offset;
1755     __ far_adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
1756     __ ldrb(rscratch1, Address(rscratch1, offset));
1757     __ cbnzw(rscratch1, dtrace_method_entry);
1758     __ bind(dtrace_method_entry_done);
1759   }
1760 
1761   // RedefineClasses() tracing support for obsolete method entry
1762   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1763     // protect the args we've loaded
1764     save_args(masm, total_c_args, c_arg, out_regs);
1765     __ mov_metadata(c_rarg1, method());
1766     __ call_VM_leaf(
1767       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1768       rthread, c_rarg1);
1769     restore_args(masm, total_c_args, c_arg, out_regs);
1770   }
1771 
1772   // Lock a synchronized method
1773 
1774   // Register definitions used by locking and unlocking
1775 


1912 
1913       // Force this write out before the read below
1914       __ dmb(Assembler::SY);
1915     } else {
1916       __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1917       __ stlrw(rscratch1, rscratch2);
1918 
1919       // Write serialization page so VM thread can do a pseudo remote membar.
1920       // We use the current thread pointer to calculate a thread specific
1921       // offset to write to within the page. This minimizes bus traffic
1922       // due to cache line collision.
1923       __ serialize_memory(rthread, r2);
1924     }
1925   }
1926 
1927   // check for safepoint operation in progress and/or pending suspend requests
1928   Label safepoint_in_progress, safepoint_in_progress_done;
1929   {
1930     assert(SafepointSynchronize::_not_synchronized == 0, "fix this code");
1931     unsigned long offset;
1932     __ far_adrp(rscratch1,
1933             ExternalAddress((address)SafepointSynchronize::address_of_state()),
1934             offset);
1935     __ ldrw(rscratch1, Address(rscratch1, offset));
1936     __ cbnzw(rscratch1, safepoint_in_progress);
1937     __ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1938     __ cbnzw(rscratch1, safepoint_in_progress);
1939     __ bind(safepoint_in_progress_done);
1940   }
1941 
1942   // change thread state
1943   Label after_transition;
1944   __ mov(rscratch1, _thread_in_Java);
1945   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1946   __ stlrw(rscratch1, rscratch2);
1947   __ bind(after_transition);
1948 
1949   Label reguard;
1950   Label reguard_done;
1951   __ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
1952   __ cmpw(rscratch1, JavaThread::stack_guard_yellow_disabled);


1985     //  get old displaced header
1986     __ ldr(old_hdr, Address(r0, 0));
1987 
1988     // Atomic swap old header if oop still contains the stack lock
1989     Label succeed;
1990     __ cmpxchgptr(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
1991     __ bind(succeed);
1992 
1993     // slow path re-enters here
1994     __ bind(unlock_done);
1995     if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1996       restore_native_result(masm, ret_type, stack_slots);
1997     }
1998 
1999     __ bind(done);
2000   }
2001 
2002   Label dtrace_method_exit, dtrace_method_exit_done;
2003   {
2004     unsigned long offset;
2005     __ far_adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
2006     __ ldrb(rscratch1, Address(rscratch1, offset));
2007     __ cbnzw(rscratch1, dtrace_method_exit);
2008     __ bind(dtrace_method_exit_done);
2009   }
2010 
2011   __ reset_last_Java_frame(false, true);
2012 
2013   // Unpack oop result
2014   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2015       Label L;
2016       __ cbz(r0, L);
2017       __ ldr(r0, Address(r0, 0));
2018       __ bind(L);
2019       __ verify_oop(r0);
2020   }
2021 
2022   if (!is_critical_native) {
2023     // reset handle block
2024     __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
2025     __ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));


< prev index next >