src/hotspot/cpu/arm/templateTable_arm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File open Cdiff src/hotspot/cpu/arm/templateTable_arm.cpp

src/hotspot/cpu/arm/templateTable_arm.cpp

Print this page

        

*** 56,68 **** static inline Address iaddress(int n) { return Address(Rlocals, Interpreter::local_offset_in_bytes(n)); } static inline Address laddress(int n) { return iaddress(n + 1); } - #ifndef AARCH64 static inline Address haddress(int n) { return iaddress(n + 0); } - #endif // !AARCH64 static inline Address faddress(int n) { return iaddress(n); } static inline Address daddress(int n) { return laddress(n); } static inline Address aaddress(int n) { return iaddress(n); } --- 56,66 ----
*** 70,85 **** void TemplateTable::get_local_base_addr(Register r, Register index) { __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize)); } Address TemplateTable::load_iaddress(Register index, Register scratch) { - #ifdef AARCH64 - get_local_base_addr(scratch, index); - return Address(scratch); - #else return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset); - #endif // AARCH64 } Address TemplateTable::load_aaddress(Register index, Register scratch) { return load_iaddress(index, scratch); } --- 68,78 ----
*** 111,159 **** static inline Address at_tos_p2() { return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2)); } - // 32-bit ARM: // Loads double/long local into R0_tos_lo/R1_tos_hi with two // separate ldr instructions (supports nonadjacent values). // Used for longs in all modes, and for doubles in SOFTFP mode. - // - // AArch64: loads long local into R0_tos. - // void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) { const Register Rlocal_base = tmp; assert_different_registers(Rlocal_index, tmp); get_local_base_addr(Rlocal_base, Rlocal_index); - #ifdef AARCH64 - __ ldr(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); - #else __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0))); - #endif // AARCH64 } - // 32-bit ARM: // Stores R0_tos_lo/R1_tos_hi to double/long local with two // separate str instructions (supports nonadjacent values). // Used for longs in all modes, and for doubles in SOFTFP mode - // - // AArch64: stores R0_tos to long local. - // void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) { const Register Rlocal_base = tmp; assert_different_registers(Rlocal_index, tmp); get_local_base_addr(Rlocal_base, Rlocal_index); - #ifdef AARCH64 - __ str(R0_tos, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); - #else __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1))); __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0))); - #endif // AARCH64 } // Returns address of Java array element using temp register as address base. Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) { int logElemSize = exact_log2(type2aelembytes(elemType)); --- 104,136 ----
*** 190,200 **** //---------------------------------------------------------------------------------------------------- // Miscelaneous helper routines // Store an oop (or NULL) at the address described by obj. ! // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). // Also destroys new_val and obj.base(). static void do_oop_store(InterpreterMacroAssembler* _masm, Address obj, Register new_val, Register tmp1, --- 167,177 ---- //---------------------------------------------------------------------------------------------------- // Miscelaneous helper routines // Store an oop (or NULL) at the address described by obj. ! // Blows all volatile registers R0-R3, Rtemp, LR). // Also destroys new_val and obj.base(). static void do_oop_store(InterpreterMacroAssembler* _masm, Address obj, Register new_val, Register tmp1,
*** 222,232 **** assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); return Address(Rbcp, offset); } ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR. void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg/*=true*/, int byte_no) { assert_different_registers(bc_reg, temp_reg); if (!RewriteBytecodes) return; --- 199,209 ---- assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); return Address(Rbcp, offset); } ! // Blows volatile registers R0-R3, Rtemp, LR. void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg/*=true*/, int byte_no) { assert_different_registers(bc_reg, temp_reg); if (!RewriteBytecodes) return;
*** 325,350 **** void TemplateTable::lconst(int value) { transition(vtos, ltos); assert((value == 0) || (value == 1), "unexpected long constant"); __ mov(R0_tos, value); - #ifndef AARCH64 __ mov(R1_tos_hi, 0); - #endif // !AARCH64 } void TemplateTable::fconst(int value) { transition(vtos, ftos); - #ifdef AARCH64 - switch(value) { - case 0: __ fmov_sw(S0_tos, ZR); break; - case 1: __ fmov_s (S0_tos, 0x70); break; - case 2: __ fmov_s (S0_tos, 0x00); break; - default: ShouldNotReachHere(); break; - } - #else const int zero = 0; // 0.0f const int one = 0x3f800000; // 1.0f const int two = 0x40000000; // 2.0f switch(value) { --- 302,317 ----
*** 355,377 **** } #ifndef __SOFTFP__ __ fmsr(S0_tos, R0_tos); #endif // !__SOFTFP__ - #endif // AARCH64 } void TemplateTable::dconst(int value) { transition(vtos, dtos); - #ifdef AARCH64 - switch(value) { - case 0: __ fmov_dx(D0_tos, ZR); break; - case 1: __ fmov_d (D0_tos, 0x70); break; - default: ShouldNotReachHere(); break; - } - #else const int one_lo = 0; // low part of 1.0 const int one_hi = 0x3ff00000; // high part of 1.0 if (value == 0) { #ifdef __SOFTFP__ --- 322,336 ----
*** 388,398 **** __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi); #endif // !__SOFTFP__ } else { ShouldNotReachHere(); } - #endif // AARCH64 } void TemplateTable::bipush() { transition(vtos, itos); --- 347,356 ----
*** 427,455 **** const int base_offset = ConstantPool::header_size() * wordSize; const int tags_offset = Array<u1>::base_offset_in_bytes(); // get const type __ add(Rtemp, Rtags, tags_offset); - #ifdef AARCH64 - __ add(Rtemp, Rtemp, Rindex); - __ ldarb(RtagType, Rtemp); // TODO-AARCH64 figure out if barrier is needed here, or control dependency is enough - #else __ ldrb(RtagType, Address(Rtemp, Rindex)); volatile_barrier(MacroAssembler::LoadLoad, Rtemp); - #endif // AARCH64 // unresolved class - get the resolved class __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass); // unresolved class in error (resolution failed) - call into runtime // so that the same error from first resolution attempt is thrown. - #ifdef AARCH64 - __ mov(Rtemp, JVM_CONSTANT_UnresolvedClassInError); // this constant does not fit into 5-bit immediate constraint - __ cond_cmp(RtagType, Rtemp, ne); - #else __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne); - #endif // AARCH64 // resolved class - need to call vm to get java mirror of the class __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne); __ b(fastCase, ne); --- 385,403 ----
*** 554,569 **** __ bind(Long); #endif __ cmp(Rtemp, JVM_CONSTANT_Long); __ b(Condy, ne); - #ifdef AARCH64 - __ ldr(R0_tos, Address(Rbase, base_offset)); - #else __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize)); __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize)); - #endif // AARCH64 __ push(ltos); __ b(exit); __ bind(Condy); condy_helper(exit); --- 502,513 ----
*** 585,600 **** // VMr = obj = base address to find primitive value to push // VMr2 = flags = (tos, off) using format of CPCE::_flags __ mov(off, flags); - #ifdef AARCH64 - __ andr(off, off, (unsigned)ConstantPoolCacheEntry::field_index_mask); - #else __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits); __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits); - #endif const Address field(obj, off); __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift); // Make sure we don't need to mask flags after the above shift --- 529,540 ----
*** 650,666 **** Label notLongDouble; __ cmp(flags, ltos); __ cond_cmp(flags, dtos, ne); __ b(notLongDouble, ne); - #ifdef AARCH64 - __ ldr(R0_tos, field); - #else __ add(rtmp, obj, wordSize); __ ldr(R0_tos_lo, Address(obj, off)); __ ldr(R1_tos_hi, Address(rtmp, off)); - #endif __ push(ltos); __ b(Done); __ bind(notLongDouble); --- 590,602 ----
*** 905,920 **** const Register Rarray = R1_tmp; const Register Rindex = R0_tos; index_check(Rarray, Rindex); - #ifdef AARCH64 - __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp)); - #else Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp); __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg); - #endif // AARCH64 } void TemplateTable::faload() { transition(itos, ftos); --- 841,852 ----
*** 1010,1025 **** } void TemplateTable::lload(int n) { transition(vtos, ltos); - #ifdef AARCH64 - __ ldr(R0_tos, laddress(n)); - #else __ ldr(R0_tos_lo, laddress(n)); __ ldr(R1_tos_hi, haddress(n)); - #endif // AARCH64 } void TemplateTable::fload(int n) { transition(vtos, ftos); --- 942,953 ----
*** 1103,1120 **** // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition"); assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition"); __ cmp(next_bytecode, Bytecodes::_fast_fgetfield); - #ifdef AARCH64 - __ mov(Rtemp, Bytecodes::_fast_faccess_0); - __ mov(target_bytecode, Bytecodes::_fast_aload_0); - __ mov(target_bytecode, Rtemp, eq); - #else __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq); __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne); - #endif // AARCH64 // rewrite __ bind(rewrite); patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false); --- 1031,1042 ----
*** 1196,1210 **** void TemplateTable::wide_lstore() { transition(vtos, vtos); const Register Rlocal_index = R2_tmp; const Register Rlocal_base = R3_tmp; - #ifdef AARCH64 - __ pop_l(R0_tos); - #else __ pop_l(R0_tos_lo, R1_tos_hi); - #endif // AARCH64 locals_index_wide(Rlocal_index); store_category2_local(Rlocal_index, R3_tmp); } --- 1118,1128 ----
*** 1250,1265 **** // R0_tos_lo:R1_tos_hi: value __ pop_i(Rindex); index_check(Rarray, Rindex); - #ifdef AARCH64 - __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp)); - #else Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp); __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false); - #endif // AARCH64 } void TemplateTable::fastore() { transition(ftos, vtos); --- 1168,1179 ----
*** 1399,1414 **** } void TemplateTable::lstore(int n) { transition(ltos, vtos); - #ifdef AARCH64 - __ str(R0_tos, laddress(n)); - #else __ str(R0_tos_lo, laddress(n)); __ str(R1_tos_hi, haddress(n)); - #endif // AARCH64 } void TemplateTable::fstore(int n) { transition(ftos, vtos); --- 1313,1324 ----
*** 1561,1600 **** case sub : __ sub_32 (R0_tos, arg1, arg2); break; case mul : __ mul_32 (R0_tos, arg1, arg2); break; case _and : __ and_32 (R0_tos, arg1, arg2); break; case _or : __ orr_32 (R0_tos, arg1, arg2); break; case _xor : __ eor_32 (R0_tos, arg1, arg2); break; - #ifdef AARCH64 - case shl : __ lslv_w (R0_tos, arg1, arg2); break; - case shr : __ asrv_w (R0_tos, arg1, arg2); break; - case ushr : __ lsrv_w (R0_tos, arg1, arg2); break; - #else case shl : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break; case shr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break; case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break; - #endif // AARCH64 default : ShouldNotReachHere(); } } void TemplateTable::lop2(Operation op) { transition(ltos, ltos); - #ifdef AARCH64 - const Register arg1 = R1_tmp; - const Register arg2 = R0_tos; - - __ pop_l(arg1); - switch (op) { - case add : __ add (R0_tos, arg1, arg2); break; - case sub : __ sub (R0_tos, arg1, arg2); break; - case _and : __ andr(R0_tos, arg1, arg2); break; - case _or : __ orr (R0_tos, arg1, arg2); break; - case _xor : __ eor (R0_tos, arg1, arg2); break; - default : ShouldNotReachHere(); - } - #else const Register arg1_lo = R2_tmp; const Register arg1_hi = R3_tmp; const Register arg2_lo = R0_tos_lo; const Register arg2_hi = R1_tos_hi; --- 1471,1490 ----
*** 1605,1692 **** case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break; case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break; case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break; default : ShouldNotReachHere(); } - #endif // AARCH64 } void TemplateTable::idiv() { transition(itos, itos); - #ifdef AARCH64 - const Register divisor = R0_tos; - const Register dividend = R1_tmp; - - __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry); - __ pop_i(dividend); - __ sdiv_w(R0_tos, dividend, divisor); - #else __ mov(R2, R0_tos); __ pop_i(R0); // R0 - dividend // R2 - divisor __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none); // R1 - result __ mov(R0_tos, R1); - #endif // AARCH64 } void TemplateTable::irem() { transition(itos, itos); - #ifdef AARCH64 - const Register divisor = R0_tos; - const Register dividend = R1_tmp; - const Register quotient = R2_tmp; - - __ cbz_w(divisor, Interpreter::_throw_ArithmeticException_entry); - __ pop_i(dividend); - __ sdiv_w(quotient, dividend, divisor); - __ msub_w(R0_tos, divisor, quotient, dividend); - #else __ mov(R2, R0_tos); __ pop_i(R0); // R0 - dividend // R2 - divisor __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none); // R0 - remainder - #endif // AARCH64 } void TemplateTable::lmul() { transition(ltos, ltos); - #ifdef AARCH64 - const Register arg1 = R0_tos; - const Register arg2 = R1_tmp; - - __ pop_l(arg2); - __ mul(R0_tos, arg1, arg2); - #else const Register arg1_lo = R0_tos_lo; const Register arg1_hi = R1_tos_hi; const Register arg2_lo = R2_tmp; const Register arg2_hi = R3_tmp; __ pop_l(arg2_lo, arg2_hi); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi); - #endif // AARCH64 } void TemplateTable::ldiv() { transition(ltos, ltos); - #ifdef AARCH64 - const Register divisor = R0_tos; - const Register dividend = R1_tmp; - - __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry); - __ pop_l(dividend); - __ sdiv(R0_tos, dividend, divisor); - #else const Register x_lo = R2_tmp; const Register x_hi = R3_tmp; const Register y_lo = R0_tos_lo; const Register y_hi = R1_tos_hi; --- 1495,1545 ----
*** 1694,1719 **** // check if y = 0 __ orrs(Rtemp, y_lo, y_hi); __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi); - #endif // AARCH64 } void TemplateTable::lrem() { transition(ltos, ltos); - #ifdef AARCH64 - const Register divisor = R0_tos; - const Register dividend = R1_tmp; - const Register quotient = R2_tmp; - - __ cbz(divisor, Interpreter::_throw_ArithmeticException_entry); - __ pop_l(dividend); - __ sdiv(quotient, dividend, divisor); - __ msub(R0_tos, divisor, quotient, dividend); - #else const Register x_lo = R2_tmp; const Register x_hi = R3_tmp; const Register y_lo = R0_tos_lo; const Register y_hi = R1_tos_hi; --- 1547,1561 ----
*** 1721,1788 **** // check if y = 0 __ orrs(Rtemp, y_lo, y_hi); __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi); - #endif // AARCH64 } void TemplateTable::lshl() { transition(itos, ltos); - #ifdef AARCH64 - const Register val = R1_tmp; - const Register shift_cnt = R0_tos; - __ pop_l(val); - __ lslv(R0_tos, val, shift_cnt); - #else const Register shift_cnt = R4_tmp; const Register val_lo = R2_tmp; const Register val_hi = R3_tmp; __ pop_l(val_lo, val_hi); __ andr(shift_cnt, R0_tos, 63); __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt); - #endif // AARCH64 } void TemplateTable::lshr() { transition(itos, ltos); - #ifdef AARCH64 - const Register val = R1_tmp; - const Register shift_cnt = R0_tos; - __ pop_l(val); - __ asrv(R0_tos, val, shift_cnt); - #else const Register shift_cnt = R4_tmp; const Register val_lo = R2_tmp; const Register val_hi = R3_tmp; __ pop_l(val_lo, val_hi); __ andr(shift_cnt, R0_tos, 63); __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt); - #endif // AARCH64 } void TemplateTable::lushr() { transition(itos, ltos); - #ifdef AARCH64 - const Register val = R1_tmp; - const Register shift_cnt = R0_tos; - __ pop_l(val); - __ lsrv(R0_tos, val, shift_cnt); - #else const Register shift_cnt = R4_tmp; const Register val_lo = R2_tmp; const Register val_hi = R3_tmp; __ pop_l(val_lo, val_hi); __ andr(shift_cnt, R0_tos, 63); __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt); - #endif // AARCH64 } void TemplateTable::fop2(Operation op) { transition(ftos, ftos); --- 1563,1608 ----
*** 1874,1889 **** } void TemplateTable::lneg() { transition(ltos, ltos); - #ifdef AARCH64 - __ neg(R0_tos, R0_tos); - #else __ rsbs(R0_tos_lo, R0_tos_lo, 0); __ rsc (R1_tos_hi, R1_tos_hi, 0); - #endif // AARCH64 } void TemplateTable::fneg() { transition(ftos, ftos); --- 1694,1705 ----
*** 1989,2029 **** #endif // ASSERT // Conversion switch (bytecode()) { case Bytecodes::_i2l: - #ifdef AARCH64 - __ sign_extend(R0_tos, R0_tos, 32); - #else __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1)); - #endif // AARCH64 break; case Bytecodes::_i2f: - #ifdef AARCH64 - __ scvtf_sw(S0_tos, R0_tos); - #else #ifdef __SOFTFP__ __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos); #else __ fmsr(S0_tmp, R0_tos); __ fsitos(S0_tos, S0_tmp); #endif // __SOFTFP__ - #endif // AARCH64 break; case Bytecodes::_i2d: - #ifdef AARCH64 - __ scvtf_dw(D0_tos, R0_tos); - #else #ifdef __SOFTFP__ __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos); #else __ fmsr(S0_tmp, R0_tos); __ fsitod(D0_tos, S0_tmp); #endif // __SOFTFP__ - #endif // AARCH64 break; case Bytecodes::_i2b: __ sign_extend(R0_tos, R0_tos, 8); break; --- 1805,1833 ----
*** 2039,2091 **** case Bytecodes::_l2i: /* nothing to do */ break; case Bytecodes::_l2f: - #ifdef AARCH64 - __ scvtf_sx(S0_tos, R0_tos); - #else __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi); #if !defined(__SOFTFP__) && !defined(__ABI_HARD__) __ fmsr(S0_tos, R0); #endif // !__SOFTFP__ && !__ABI_HARD__ - #endif // AARCH64 break; case Bytecodes::_l2d: - #ifdef AARCH64 - __ scvtf_dx(D0_tos, R0_tos); - #else __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi); #if !defined(__SOFTFP__) && !defined(__ABI_HARD__) __ fmdrr(D0_tos, R0, R1); #endif // !__SOFTFP__ && !__ABI_HARD__ - #endif // AARCH64 break; case Bytecodes::_f2i: - #ifdef AARCH64 - __ fcvtzs_ws(R0_tos, S0_tos); - #else #ifndef __SOFTFP__ __ ftosizs(S0_tos, S0_tos); __ fmrs(R0_tos, S0_tos); #else __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos); #endif // !__SOFTFP__ - #endif // AARCH64 break; case Bytecodes::_f2l: - #ifdef AARCH64 - __ fcvtzs_xs(R0_tos, S0_tos); - #else #ifndef __SOFTFP__ __ fmrs(R0_tos, S0_tos); #endif // !__SOFTFP__ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos); - #endif // AARCH64 break; case Bytecodes::_f2d: #ifdef __SOFTFP__ __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos); --- 1843,1879 ----
*** 2093,2123 **** __ convert_f2d(D0_tos, S0_tos); #endif // __SOFTFP__ break; case Bytecodes::_d2i: - #ifdef AARCH64 - __ fcvtzs_wd(R0_tos, D0_tos); - #else #ifndef __SOFTFP__ __ ftosizd(Stemp, D0); __ fmrs(R0, Stemp); #else __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi); #endif // !__SOFTFP__ - #endif // AARCH64 break; case Bytecodes::_d2l: - #ifdef AARCH64 - __ fcvtzs_xd(R0_tos, D0_tos); - #else #ifndef __SOFTFP__ __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos); #endif // !__SOFTFP__ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi); - #endif // AARCH64 break; case Bytecodes::_d2f: #ifdef __SOFTFP__ __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi); --- 1881,1903 ----
*** 2132,2151 **** } void TemplateTable::lcmp() { transition(ltos, itos); - #ifdef AARCH64 - const Register arg1 = R1_tmp; - const Register arg2 = R0_tos; - - __ pop_l(arg1); - - __ cmp(arg1, arg2); - __ cset(R0_tos, gt); // 1 if '>', else 0 - __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1 - #else const Register arg1_lo = R2_tmp; const Register arg1_hi = R3_tmp; const Register arg2_lo = R0_tos_lo; const Register arg2_hi = R1_tos_hi; const Register res = R4_tmp; --- 1912,1921 ----
*** 2164,2200 **** __ cmp (arg1_lo, arg2_lo); __ mvn (res, 0, lo); __ mov (res, 1, hi); __ bind(done); __ mov (R0_tos, res); - #endif // AARCH64 } void TemplateTable::float_cmp(bool is_float, int unordered_result) { assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result"); - #ifdef AARCH64 - if (is_float) { - transition(ftos, itos); - __ pop_f(S1_tmp); - __ fcmp_s(S1_tmp, S0_tos); - } else { - transition(dtos, itos); - __ pop_d(D1_tmp); - __ fcmp_d(D1_tmp, D0_tos); - } - - if (unordered_result < 0) { - __ cset(R0_tos, gt); // 1 if '>', else 0 - __ csinv(R0_tos, R0_tos, ZR, ge); // previous value if '>=', else -1 - } else { - __ cset(R0_tos, hi); // 1 if '>' or unordered, else 0 - __ csinv(R0_tos, R0_tos, ZR, pl); // previous value if '>=' or unordered, else -1 - } - - #else #ifdef __SOFTFP__ if (is_float) { transition(ftos, itos); --- 1934,1949 ----
*** 2256,2266 **** __ mov(R0_tos, 1); // result == 1 if greater or unordered __ mvn(R0_tos, 0, mi); // result == -1 if less (N=1) } __ mov(R0_tos, 0, eq); // result == 0 if equ (Z=1) #endif // __SOFTFP__ - #endif // AARCH64 } void TemplateTable::branch(bool is_jsr, bool is_wide) { --- 2005,2014 ----
*** 2301,2316 **** __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset())); __ sub(Rret_addr, Rret_addr, Rtemp); // Load the next target bytecode into R3_bytecode and advance Rbcp - #ifdef AARCH64 - __ add(Rbcp, Rbcp, Rdisp); - __ ldrb(R3_bytecode, Address(Rbcp)); - #else __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed)); - #endif // AARCH64 // Push return address __ push_i(Rret_addr); // jsr returns vtos __ dispatch_only_noverify(vtos); --- 2049,2059 ----
*** 2318,2333 **** } // Normal (non-jsr) branch handling // Adjust the bcp by the displacement in Rdisp and load next bytecode. - #ifdef AARCH64 - __ add(Rbcp, Rbcp, Rdisp); - __ ldrb(R3_bytecode, Address(Rbcp)); - #else __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed)); - #endif // AARCH64 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); Label backedge_counter_overflow; Label profile_method; Label dispatch; --- 2061,2071 ----
*** 2338,2353 **** const Register Rcnt = R2_tmp; const Register Rcounters = R1_tmp; // count only if backward branch - #ifdef AARCH64 - __ tbz(Rdisp, (BitsPerWord - 1), dispatch); // TODO-AARCH64: check performance of this variant on 32-bit ARM - #else __ tst(Rdisp, Rdisp); __ b(dispatch, pl); - #endif // AARCH64 if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; if (ProfileInterpreter) { --- 2076,2087 ----
*** 2362,2393 **** Rcnt, R4_tmp, eq, &backedge_counter_overflow); __ b(dispatch); } __ bind(no_mdo); // Increment backedge counter in MethodCounters* ! // Note Rbumped_taken_count is a callee saved registers for ARM32, but caller saved for ARM64 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, Rdisp, R3_bytecode, ! AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg)); const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset())); __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask, Rcnt, R4_tmp, eq, &backedge_counter_overflow); } else { // Increment backedge counter in MethodCounters* __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, Rdisp, R3_bytecode, ! AARCH64_ONLY(Rbumped_taken_count) NOT_AARCH64(noreg)); __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter __ ldr_u32(Rcnt, Address(Rcounters, inv_offset)); // load invocation counter - #ifdef AARCH64 - __ andr(Rcnt, Rcnt, (unsigned int)InvocationCounter::count_mask_value); // and the status bits - #else __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value); // and the status bits - #endif // AARCH64 __ add(Rcnt, Rcnt, Rtemp); // add both counters if (ProfileInterpreter) { // Test to see if we should create a method data oop const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); --- 2096,2123 ---- Rcnt, R4_tmp, eq, &backedge_counter_overflow); __ b(dispatch); } __ bind(no_mdo); // Increment backedge counter in MethodCounters* ! // Note Rbumped_taken_count is a callee saved registers for ARM32 __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, Rdisp, R3_bytecode, ! noreg); const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset())); __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask, Rcnt, R4_tmp, eq, &backedge_counter_overflow); } else { // Increment backedge counter in MethodCounters* __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/, Rdisp, R3_bytecode, ! noreg); __ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter __ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter __ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter __ ldr_u32(Rcnt, Address(Rcounters, inv_offset)); // load invocation counter __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value); // and the status bits __ add(Rcnt, Rcnt, Rtemp); // add both counters if (ProfileInterpreter) { // Test to see if we should create a method data oop const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
*** 2410,2426 **** // frequency_counter_overflow(). To avoid excessive calls to the overflow // routine while the method is being compiled, add a second test to make // sure the overflow function is called only once every overflow_frequency. const int overflow_frequency = 1024; - #ifdef AARCH64 - __ tst(Rbumped_taken_count, (unsigned)(overflow_frequency-1)); - #else // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0 assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency"); __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22)); - #endif // AARCH64 __ b(backedge_counter_overflow, eq); } } else { if (UseOnStackReplacement) { --- 2140,2152 ----
*** 2485,2501 **** // R0 is OSR buffer __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset())); __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); - #ifdef AARCH64 - __ ldp(FP, LR, Address(FP)); - __ mov(SP, Rtemp); - #else __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); __ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack - #endif // AARCH64 __ jump(R1_tmp); } } } --- 2211,2222 ----
*** 2503,2525 **** void TemplateTable::if_0cmp(Condition cc) { transition(itos, vtos); // assume branch is more often taken than not (loops use backward branches) Label not_taken; - #ifdef AARCH64 - if (cc == equal) { - __ cbnz_w(R0_tos, not_taken); - } else if (cc == not_equal) { - __ cbz_w(R0_tos, not_taken); - } else { __ cmp_32(R0_tos, 0); __ b(not_taken, convNegCond(cc)); - } - #else - __ cmp_32(R0_tos, 0); - __ b(not_taken, convNegCond(cc)); - #endif // AARCH64 branch(false, false); __ bind(not_taken); __ profile_not_taken_branch(R0_tmp); } --- 2224,2235 ----
*** 2601,2655 **** void TemplateTable::tableswitch() { transition(itos, vtos); const Register Rindex = R0_tos; - #ifndef AARCH64 const Register Rtemp2 = R1_tmp; - #endif // !AARCH64 const Register Rabcp = R2_tmp; // aligned bcp const Register Rlow = R3_tmp; const Register Rhigh = R4_tmp; const Register Roffset = R5_tmp; // align bcp __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1)); __ align_reg(Rabcp, Rtemp, BytesPerInt); // load lo & hi - #ifdef AARCH64 - __ ldp_w(Rlow, Rhigh, Address(Rabcp, 2*BytesPerInt, post_indexed)); - #else __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback); - #endif // AARCH64 __ byteswap_u32(Rlow, Rtemp, Rtemp2); __ byteswap_u32(Rhigh, Rtemp, Rtemp2); // compare index with high bound __ cmp_32(Rhigh, Rindex); - #ifdef AARCH64 - Label default_case, do_dispatch; - __ ccmp_w(Rindex, Rlow, Assembler::flags_for_condition(lt), ge); - __ b(default_case, lt); - - __ sub_w(Rindex, Rindex, Rlow); - __ ldr_s32(Roffset, Address(Rabcp, Rindex, ex_sxtw, LogBytesPerInt)); - if(ProfileInterpreter) { - __ sxtw(Rindex, Rindex); - __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp); - } - __ b(do_dispatch); - - __ bind(default_case); - __ ldr_s32(Roffset, Address(Rabcp, -3 * BytesPerInt)); - if(ProfileInterpreter) { - __ profile_switch_default(R0_tmp); - } - - __ bind(do_dispatch); - #else // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow) __ subs(Rindex, Rindex, Rlow, ge); // if Rindex <= Rhigh and (Rindex - Rlow) >= 0 --- 2311,2338 ----
*** 2671,2691 **** __ bind(continue_execution); } else { __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt); __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge); } - #endif // AARCH64 __ byteswap_u32(Roffset, Rtemp, Rtemp2); // load the next bytecode to R3_bytecode and advance Rbcp - #ifdef AARCH64 - __ add(Rbcp, Rbcp, Roffset, ex_sxtw); - __ ldrb(R3_bytecode, Address(Rbcp)); - #else __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed)); - #endif // AARCH64 __ dispatch_only(vtos); } --- 2354,2368 ----
*** 2711,2746 **** // align bcp __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1)); __ align_reg(Rabcp, Rtemp, BytesPerInt); // load default & counter - #ifdef AARCH64 - __ ldp_w(Rdefault, Rcount, Address(Rabcp, 2*BytesPerInt, post_indexed)); - #else __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback); - #endif // AARCH64 __ byteswap_u32(Rcount, R1_tmp, Rtemp); - #ifdef AARCH64 - __ cbz_w(Rcount, default_case); - #else __ cmp_32(Rcount, 0); __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne); __ b(default_case, eq); - #endif // AARCH64 // table search __ bind(loop); - #ifdef AARCH64 - __ ldr_s32(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed)); - #endif // AARCH64 __ cmp_32(Rtemp, Rkey); __ b(found, eq); __ subs(Rcount, Rcount, 1); - #ifndef AARCH64 __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne); - #endif // !AARCH64 __ b(loop, ne); // default case __ bind(default_case); __ profile_switch_default(R0_tmp); --- 2388,2410 ----
*** 2771,2786 **** // continue execution __ bind(continue_execution); __ byteswap_u32(Roffset, R1_tmp, Rtemp); // load the next bytecode to R3_bytecode and advance Rbcp - #ifdef AARCH64 - __ add(Rbcp, Rbcp, Roffset, ex_sxtw); - __ ldrb(R3_bytecode, Address(Rbcp)); - #else __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed)); - #endif // AARCH64 __ dispatch_only(vtos); } void TemplateTable::fast_binaryswitch() { --- 2435,2445 ----
*** 2844,2859 **** // if (key < array[h].fast_match()) { // j = h; // } else { // i = h; // } - #ifdef AARCH64 - __ add(temp1, array, AsmOperand(h, lsl, 1+LogBytesPerInt)); - __ ldr_s32(val, Address(temp1)); - #else __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt)); - #endif // AARCH64 // Convert array[h].match to native byte-ordering before compare __ byteswap_u32(val, temp1, temp2); __ cmp_32(key, val); __ mov(j, h, lt); // j = h if (key < array[h].fast_match()) __ mov(i, h, ge); // i = h if (key >= array[h].fast_match()) --- 2503,2513 ----
*** 2865,2908 **** } // end of binary search, result index is i (must check again!) Label default_case; // Convert array[i].match to native byte-ordering before compare - #ifdef AARCH64 - __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt)); - __ ldr_s32(val, Address(temp1)); - #else __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt)); - #endif // AARCH64 __ byteswap_u32(val, temp1, temp2); __ cmp_32(key, val); __ b(default_case, ne); // entry found __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt)); __ ldr_s32(offset, Address(temp1, 1*BytesPerInt)); __ profile_switch_case(R0, i, R1, i); __ byteswap_u32(offset, temp1, temp2); - #ifdef AARCH64 - __ add(Rbcp, Rbcp, offset, ex_sxtw); - __ ldrb(R3_bytecode, Address(Rbcp)); - #else __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed)); - #endif // AARCH64 __ dispatch_only(vtos); // default case __ bind(default_case); __ profile_switch_default(R0); __ ldr_s32(offset, Address(array, -2*BytesPerInt)); __ byteswap_u32(offset, temp1, temp2); - #ifdef AARCH64 - __ add(Rbcp, Rbcp, offset, ex_sxtw); - __ ldrb(R3_bytecode, Address(Rbcp)); - #else __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed)); - #endif // AARCH64 __ dispatch_only(vtos); } void TemplateTable::_return(TosState state) { --- 2519,2547 ----
*** 2930,2946 **** } __ remove_activation(state, LR); __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); - #ifndef AARCH64 // According to interpreter calling conventions, result is returned in R0/R1, // so ftos (S0) and dtos (D0) are moved to R0/R1. // This conversion should be done after remove_activation, as it uses // push(state) & pop(state) to preserve return value. __ convert_tos_to_retval(state); - #endif // !AARCH64 __ ret(); __ nop(); // to avoid filling CPU pipeline with invalid instructions __ nop(); --- 2569,2583 ----
*** 2970,2992 **** // previous memory refs). Requirements (2) and (3) require some barriers // before volatile stores and after volatile loads. These nearly cover // requirement (1) but miss the volatile-store-volatile-load case. This final // case is placed after volatile-stores although it could just as well go // before volatile-loads. - // TODO-AARCH64: consider removing extra unused parameters void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint, Register tmp, bool preserve_flags, Register load_tgt) { - #ifdef AARCH64 - __ membar(order_constraint); - #else __ membar(order_constraint, tmp, preserve_flags, load_tgt); - #endif } ! // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR. void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rindex, size_t index_size) { assert_different_registers(Rcache, Rindex, Rtemp); --- 2607,2624 ---- // previous memory refs). Requirements (2) and (3) require some barriers // before volatile stores and after volatile loads. These nearly cover // requirement (1) but miss the volatile-store-volatile-load case. This final // case is placed after volatile-stores although it could just as well go // before volatile-loads. void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint, Register tmp, bool preserve_flags, Register load_tgt) { __ membar(order_constraint, tmp, preserve_flags, load_tgt); } ! // Blows all volatile registers: R0-R3, Rtemp, LR. void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rindex, size_t index_size) { assert_different_registers(Rcache, Rindex, Rtemp);
*** 3044,3054 **** __ resolve_oop_handle(Robj); } } ! // Blows all volatile registers: R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR. void TemplateTable::load_invoke_cp_cache_entry(int byte_no, Register method, Register itable_index, Register flags, bool is_invokevirtual, --- 2676,2686 ---- __ resolve_oop_handle(Robj); } } ! // Blows all volatile registers: R0-R3, Rtemp, LR. void TemplateTable::load_invoke_cp_cache_entry(int byte_no, Register method, Register itable_index, Register flags, bool is_invokevirtual,
*** 3087,3097 **** __ ldr_u32(flags, Address(temp_reg, flags_offset)); } // The registers cache and index expected to be set before call, and should not be Rtemp. ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, // except cache and index registers which are preserved. void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rindex, bool is_static, bool has_tos) { --- 2719,2729 ---- __ ldr_u32(flags, Address(temp_reg, flags_offset)); } // The registers cache and index expected to be set before call, and should not be Rtemp. ! // Blows volatile registers R0-R3, Rtemp, LR, // except cache and index registers which are preserved. void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rindex, bool is_static, bool has_tos) {
*** 3165,3214 **** // Make sure we don't need to mask flags after the above shift ConstantPoolCacheEntry::verify_tos_state_shift(); // There are actually two versions of implementation of getfield/getstatic: // - // 32-bit ARM: // 1) Table switch using add(PC,...) instruction (fast_version) // 2) Table switch using ldr(PC,...) instruction // - // AArch64: - // 1) Table switch using adr/add/br instructions (fast_version) - // 2) Table switch using adr/ldr/br instructions - // // First version requires fixed size of code block for each case and // can not be used in RewriteBytecodes and VerifyOops // modes. // Size of fixed size code block for fast_version ! const int log_max_block_size = AARCH64_ONLY(2) NOT_AARCH64(3); const int max_block_size = 1 << log_max_block_size; // Decide if fast version is enabled ! bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !VerifyInterpreterStackTop; // On 32-bit ARM atos and itos cases can be merged only for fast version, because // atos requires additional processing in slow version. ! // On AArch64 atos and itos cannot be merged. ! bool atos_merged_with_itos = AARCH64_ONLY(false) NOT_AARCH64(fast_version); assert(number_of_states == 10, "number of tos states should be equal to 9"); __ cmp(Rflags, itos); - #ifdef AARCH64 - __ b(Lint, eq); - - if(fast_version) { - __ adr(Rtemp, Lbtos); - __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize)); - __ br(Rtemp); - } else { - __ adr(Rtemp, Ltable); - __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags)); - __ br(Rtemp); - } - #else if(atos_merged_with_itos) { __ cmp(Rflags, atos, ne); } // table switch by type --- 2797,2827 ---- // Make sure we don't need to mask flags after the above shift ConstantPoolCacheEntry::verify_tos_state_shift(); // There are actually two versions of implementation of getfield/getstatic: // // 1) Table switch using add(PC,...) instruction (fast_version) // 2) Table switch using ldr(PC,...) instruction // // First version requires fixed size of code block for each case and // can not be used in RewriteBytecodes and VerifyOops // modes. // Size of fixed size code block for fast_version ! const int log_max_block_size = 3; const int max_block_size = 1 << log_max_block_size; // Decide if fast version is enabled ! bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops; // On 32-bit ARM atos and itos cases can be merged only for fast version, because // atos requires additional processing in slow version. ! bool atos_merged_with_itos = fast_version; assert(number_of_states == 10, "number of tos states should be equal to 9"); __ cmp(Rflags, itos); if(atos_merged_with_itos) { __ cmp(Rflags, atos, ne); } // table switch by type
*** 3218,3234 **** __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); } // jump to itos/atos case __ b(Lint); - #endif // AARCH64 // table with addresses for slow version if (fast_version) { // nothing to do } else { - AARCH64_ONLY(__ align(wordSize)); __ bind(Ltable); __ emit_address(Lbtos); __ emit_address(Lztos); __ emit_address(Lctos); __ emit_address(Lstos); --- 2831,2845 ----
*** 3307,3321 **** // ltos { assert(ltos == seq++, "ltos has unexpected value"); FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version); __ bind(Lltos); - #ifdef AARCH64 - __ ldr(R0_tos, Address(Robj, Roffset)); - #else __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg); - #endif // AARCH64 __ push(ltos); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp); } __ b(Done); --- 2918,2928 ----
*** 3341,3355 **** assert(dtos == seq++, "dtos has unexpected value"); FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version); __ bind(Ldtos); // doubles and longs are placed on stack in the same way, so // we can use push(ltos) to transfer value without using VFP - #ifdef AARCH64 - __ ldr(R0_tos, Address(Robj, Roffset)); - #else __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg); - #endif // AARCH64 __ push(ltos); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp); } __ b(Done); --- 2948,2958 ----
*** 3357,3367 **** // atos { assert(atos == seq++, "atos has unexpected value"); ! // atos case for AArch64 and slow version on 32-bit ARM if(!atos_merged_with_itos) { __ bind(Latos); do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ push(atos); // Rewrite bytecode to be faster --- 2960,2970 ---- // atos { assert(atos == seq++, "atos has unexpected value"); ! // atos case for slow version on 32-bit ARM if(!atos_merged_with_itos) { __ bind(Latos); do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ push(atos); // Rewrite bytecode to be faster
*** 3414,3424 **** getfield_or_static(byte_no, true); } // The registers cache and index expected to be set before call, and should not be R1 or Rtemp. ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, // except cache and index registers which are preserved. void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) { ByteSize cp_base_offset = ConstantPoolCache::base_offset(); assert_different_registers(Rcache, Rindex, R1, Rtemp); --- 3017,3027 ---- getfield_or_static(byte_no, true); } // The registers cache and index expected to be set before call, and should not be R1 or Rtemp. ! // Blows volatile registers R0-R3, Rtemp, LR, // except cache and index registers which are preserved. void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) { ByteSize cp_base_offset = ConstantPoolCache::base_offset(); assert_different_registers(Rcache, Rindex, R1, Rtemp);
*** 3446,3467 **** // Make sure we don't need to mask Rtemp after the above shift ConstantPoolCacheEntry::verify_tos_state_shift(); __ cmp(Rtemp, ltos); __ cond_cmp(Rtemp, dtos, ne); - #ifdef AARCH64 - __ mov(Rtemp, Interpreter::expr_offset_in_bytes(2)); - __ mov(R1, Interpreter::expr_offset_in_bytes(1)); - __ mov(R1, Rtemp, eq); - __ ldr(R1, Address(Rstack_top, R1)); - #else // two word value (ltos/dtos) __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq); // one word value (not ltos, dtos) __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne); - #endif // AARCH64 } // cache entry pointer __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); __ add(R2, R2, in_bytes(cp_base_offset)); --- 3049,3063 ----
*** 3520,3578 **** // // 32-bit ARM: // 1) Table switch using add(PC,...) instruction (fast_version) // 2) Table switch using ldr(PC,...) instruction // - // AArch64: - // 1) Table switch using adr/add/br instructions (fast_version) - // 2) Table switch using adr/ldr/br instructions - // // First version requires fixed size of code block for each case and // can not be used in RewriteBytecodes and VerifyOops // modes. // Size of fixed size code block for fast_version (in instructions) ! const int log_max_block_size = AARCH64_ONLY(is_static ? 2 : 3) NOT_AARCH64(3); const int max_block_size = 1 << log_max_block_size; // Decide if fast version is enabled ! bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops && !ZapHighNonSignificantBits; assert(number_of_states == 10, "number of tos states should be equal to 9"); // itos case is frequent and is moved outside table switch __ cmp(Rflags, itos); - #ifdef AARCH64 - __ b(Lint, eq); - - if (fast_version) { - __ adr(Rtemp, Lbtos); - __ add(Rtemp, Rtemp, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize)); - __ br(Rtemp); - } else { - __ adr(Rtemp, Ltable); - __ ldr(Rtemp, Address::indexed_ptr(Rtemp, Rflags)); - __ br(Rtemp); - } - #else // table switch by type if (fast_version) { __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne); } else { __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); } // jump to itos case __ b(Lint); - #endif // AARCH64 // table with addresses for slow version if (fast_version) { // nothing to do } else { - AARCH64_ONLY(__ align(wordSize)); __ bind(Ltable); __ emit_address(Lbtos); __ emit_address(Lztos); __ emit_address(Lctos); __ emit_address(Lstos); --- 3116,3155 ---- // // 32-bit ARM: // 1) Table switch using add(PC,...) instruction (fast_version) // 2) Table switch using ldr(PC,...) instruction // // First version requires fixed size of code block for each case and // can not be used in RewriteBytecodes and VerifyOops // modes. // Size of fixed size code block for fast_version (in instructions) ! const int log_max_block_size = 3; const int max_block_size = 1 << log_max_block_size; // Decide if fast version is enabled ! bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops; assert(number_of_states == 10, "number of tos states should be equal to 9"); // itos case is frequent and is moved outside table switch __ cmp(Rflags, itos); // table switch by type if (fast_version) { __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne); } else { __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne); } // jump to itos case __ b(Lint); // table with addresses for slow version if (fast_version) { // nothing to do } else { __ bind(Ltable); __ emit_address(Lbtos); __ emit_address(Lztos); __ emit_address(Lctos); __ emit_address(Lstos);
*** 3655,3669 **** assert(ltos == seq++, "ltos has unexpected value"); FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version); __ bind(Lltos); __ pop(ltos); if (!is_static) pop_and_check_object(Robj); - #ifdef AARCH64 - __ str(R0_tos, Address(Robj, Roffset)); - #else __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false); - #endif // AARCH64 if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no); } __ b(Done); } --- 3232,3242 ----
*** 3691,3705 **** __ bind(Ldtos); // doubles and longs are placed on stack in the same way, so // we can use pop(ltos) to transfer value without using VFP __ pop(ltos); if (!is_static) pop_and_check_object(Robj); - #ifdef AARCH64 - __ str(R0_tos, Address(Robj, Roffset)); - #else __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false); - #endif // AARCH64 if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no); } __ b(Done); } --- 3264,3274 ----
*** 3780,3790 **** void TemplateTable::jvmti_post_fast_field_mod() { // This version of jvmti_post_fast_field_mod() is not used on ARM Unimplemented(); } ! // Blows volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64), Rtemp, LR, // but preserves tosca with the given state. void TemplateTable::jvmti_post_fast_field_mod(TosState state) { if (__ can_post_field_modification()) { // Check to see if a field modification watch has been set before we take // the time to call into the VM. --- 3349,3359 ---- void TemplateTable::jvmti_post_fast_field_mod() { // This version of jvmti_post_fast_field_mod() is not used on ARM Unimplemented(); } ! // Blows volatile registers R0-R3, Rtemp, LR, // but preserves tosca with the given state. void TemplateTable::jvmti_post_fast_field_mod(TosState state) { if (__ can_post_field_modification()) { // Check to see if a field modification watch has been set before we take // the time to call into the VM.
*** 3849,3859 **** if (gen_volatile_check) { // Check for volatile store Label notVolatile; __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); - // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); __ bind(notVolatile); } --- 3418,3427 ----
*** 3876,3909 **** __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false); break; case Bytecodes::_fast_iputfield: __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false); break; - #ifdef AARCH64 - case Bytecodes::_fast_lputfield: __ str (R0_tos, addr); break; - case Bytecodes::_fast_fputfield: __ str_s(S0_tos, addr); break; - case Bytecodes::_fast_dputfield: __ str_d(D0_tos, addr); break; - #else case Bytecodes::_fast_lputfield: __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); break; case Bytecodes::_fast_fputfield: __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); break; case Bytecodes::_fast_dputfield: __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); break; - #endif // AARCH64 - case Bytecodes::_fast_aputfield: do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false); break; default: ShouldNotReachHere(); } if (gen_volatile_check) { Label notVolatile; Label skipMembar; __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift | 1 << ConstantPoolCacheEntry::is_final_shift); --- 3444,3471 ---- __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false); break; case Bytecodes::_fast_iputfield: __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false); break; case Bytecodes::_fast_lputfield: __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); break; case Bytecodes::_fast_fputfield: __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); break; case Bytecodes::_fast_dputfield: __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false); break; case Bytecodes::_fast_aputfield: do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false); break; default: ShouldNotReachHere(); } + if (gen_volatile_check) { Label notVolatile; Label skipMembar; __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift | 1 << ConstantPoolCacheEntry::is_final_shift);
*** 3982,4006 **** __ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg); break; case Bytecodes::_fast_igetfield: __ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg); break; - #ifdef AARCH64 - case Bytecodes::_fast_lgetfield: __ ldr (R0_tos, addr); break; - case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, addr); break; - case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, addr); break; - #else case Bytecodes::_fast_lgetfield: __ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg); break; case Bytecodes::_fast_fgetfield: __ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg); break; case Bytecodes::_fast_dgetfield: __ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg); break; - #endif // AARCH64 case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, addr); __ verify_oop(R0_tos); break; default: --- 3544,3562 ----
*** 4010,4020 **** if (gen_volatile_check) { // Check for volatile load Label notVolatile; __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); - // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); __ bind(notVolatile); } } --- 3566,3575 ----
*** 4048,4116 **** // make sure exception is reported in correct bcp range (getfield is next instruction) __ add(Rbcp, Rbcp, 1); __ null_check(Robj, Rtemp); __ sub(Rbcp, Rbcp, 1); - #ifdef AARCH64 - if (gen_volatile_check) { - Label notVolatile; - __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); - - __ add(Rtemp, Robj, Roffset); - - if (state == itos) { - __ ldar_w(R0_tos, Rtemp); - } else if (state == atos) { - if (UseCompressedOops) { - __ ldar_w(R0_tos, Rtemp); - __ decode_heap_oop(R0_tos); - } else { - __ ldar(R0_tos, Rtemp); - } - __ verify_oop(R0_tos); - } else if (state == ftos) { - __ ldar_w(R0_tos, Rtemp); - __ fmov_sw(S0_tos, R0_tos); - } else { - ShouldNotReachHere(); - } - __ b(done); - - __ bind(notVolatile); - } - #endif // AARCH64 if (state == itos) { __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg); } else if (state == atos) { do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); } else if (state == ftos) { - #ifdef AARCH64 - __ ldr_s(S0_tos, Address(Robj, Roffset)); - #else #ifdef __SOFTFP__ __ ldr(R0_tos, Address(Robj, Roffset)); #else __ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg); #endif // __SOFTFP__ - #endif // AARCH64 } else { ShouldNotReachHere(); } - #ifndef AARCH64 if (gen_volatile_check) { // Check for volatile load Label notVolatile; __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp); __ bind(notVolatile); } - #endif // !AARCH64 __ bind(done); } --- 3603,3637 ----
*** 4376,4386 **** } void TemplateTable::invokehandle(int byte_no) { transition(vtos, vtos); - // TODO-AARCH64 review register usage const Register Rrecv = R2_tmp; const Register Rmtype = R4_tmp; const Register R5_method = R5_tmp; // can't reuse Rmethod! prepare_invoke(byte_no, R5_method, Rmtype, Rrecv); --- 3897,3906 ----
*** 4398,4408 **** } void TemplateTable::invokedynamic(int byte_no) { transition(vtos, vtos); - // TODO-AARCH64 review register usage const Register Rcallsite = R4_tmp; const Register R5_method = R5_tmp; // can't reuse Rmethod! prepare_invoke(byte_no, R5_method, Rcallsite); --- 3918,3927 ----
*** 4454,4472 **** // This is done before loading InstanceKlass to be consistent with the order // how Constant Pool is updated (see ConstantPool::klass_at_put) const int tags_offset = Array<u1>::base_offset_in_bytes(); __ add(Rtemp, Rtags, Rindex); - #ifdef AARCH64 - __ add(Rtemp, Rtemp, tags_offset); - __ ldarb(Rtemp, Rtemp); - #else __ ldrb(Rtemp, Address(Rtemp, tags_offset)); // use Rklass as a scratch volatile_barrier(MacroAssembler::LoadLoad, Rklass); - #endif // AARCH64 // get InstanceKlass __ cmp(Rtemp, JVM_CONSTANT_Class); __ b(slow_case, ne); __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass); --- 3973,3986 ----
*** 4527,4556 **** __ ldr_literal(Rheap_top_addr, Lheap_top_addr); Label retry; __ bind(retry); - #ifdef AARCH64 - __ ldxr(Robj, Rheap_top_addr); - #else __ ldr(Robj, Address(Rheap_top_addr)); - #endif // AARCH64 __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr())); __ add(Rheap_top, Robj, Rsize); __ cmp(Rheap_top, Rheap_end); __ b(slow_case, hi); // Update heap top atomically. // If someone beats us on the allocation, try again, otherwise continue. - #ifdef AARCH64 - __ stxr(Rtemp2, Rheap_top, Rheap_top_addr); - __ cbnz_w(Rtemp2, retry); - #else __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/); __ b(retry, ne); - #endif // AARCH64 __ incr_allocated_bytes(Rsize, Rtemp); } } --- 4041,4061 ----
*** 4575,4599 **** __ b(L, eq); __ stop("object size is not multiple of 8 - adjust this code"); __ bind(L); #endif - #ifdef AARCH64 - { - Label loop; - // Step back by 1 word if object size is not a multiple of 2*wordSize. - assert(wordSize <= sizeof(oopDesc), "oop header should contain at least one word"); - __ andr(Rtemp2, Rsize, (uintx)wordSize); - __ sub(Rzero_cur, Rzero_cur, Rtemp2); - - // Zero by 2 words per iteration. - __ bind(loop); - __ subs(Rsize, Rsize, 2*wordSize); - __ stp(ZR, ZR, Address(Rzero_cur, 2*wordSize, post_indexed)); - __ b(loop, gt); - } - #else __ mov(Rzero0, 0); __ mov(Rzero1, 0); __ add(Rzero_end, Rzero_cur, Rsize); // initialize remaining object fields: Rsize was a multiple of 8 --- 4080,4089 ----
*** 4606,4616 **** // #2 __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne); __ cmp(Rzero_cur, Rzero_end, ne); __ b(loop, ne); } - #endif // AARCH64 // initialize object header only. __ bind(initialize_header); if (UseBiasedLocking) { __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset())); --- 4096,4105 ----
*** 4619,4631 **** } // mark __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes())); // klass - #ifdef AARCH64 - __ store_klass_gap(Robj); - #endif // AARCH64 __ store_klass(Rklass, Robj); // blows Rklass: Rklass = noreg; // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation if (DTraceAllocProbes) { --- 4108,4117 ----
*** 4712,4734 **** __ get_cpool_and_tags(Rcpool, Rtags); __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); // See if bytecode has already been quicked __ add(Rtemp, Rtags, Rindex); - #ifdef AARCH64 - // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough - __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes()); - __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier - #else __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes())); - #endif // AARCH64 __ cmp(Rtemp, JVM_CONSTANT_Class); - #ifndef AARCH64 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true); - #endif // !AARCH64 __ b(quicked, eq); __ push(atos); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); --- 4198,4212 ----
*** 4792,4813 **** __ get_cpool_and_tags(Rcpool, Rtags); __ get_unsigned_2_byte_index_at_bcp(Rindex, 1); // See if bytecode has already been quicked __ add(Rtemp, Rtags, Rindex); - #ifdef AARCH64 - // TODO-AARCH64: investigate if LoadLoad barrier is needed here or control dependency is enough - __ add(Rtemp, Rtemp, Array<u1>::base_offset_in_bytes()); - __ ldarb(Rtemp, Rtemp); // acts as LoadLoad memory barrier - #else __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes())); - #endif // AARCH64 __ cmp(Rtemp, JVM_CONSTANT_Class); - #ifndef AARCH64 volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true); - #endif // !AARCH64 __ b(quicked, eq); __ push(atos); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); --- 4270,4283 ----
*** 4859,4873 **** // get the unpatched byte code __ mov(R1, Rmethod); __ mov(R2, Rbcp); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2); - #ifdef AARCH64 - __ sxtw(Rtmp_save0, R0); - #else __ mov(Rtmp_save0, R0); - #endif // AARCH64 // post the breakpoint event __ mov(R1, Rmethod); __ mov(R2, Rbcp); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2); --- 4329,4339 ----
*** 4935,4967 **** // points to current entry, starting with top-most entry __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); // points to word before bottom of monitor block __ cmp(Rcur, Rbottom); // check if there are no monitors - #ifndef AARCH64 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); // prefetch monitor's object for the first iteration - #endif // !AARCH64 __ b(allocate_monitor, eq); // there are no monitors, skip searching __ bind(loop); - #ifdef AARCH64 - __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); - #endif // AARCH64 __ cmp(Rcur_obj, 0); // check if current entry is used __ mov(Rentry, Rcur, eq); // if not used then remember entry __ cmp(Rcur_obj, Robj); // check if current entry is for same object __ b(exit, eq); // if same object then stop searching __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry __ cmp(Rcur, Rbottom); // check if bottom reached - #ifndef AARCH64 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); // prefetch monitor's object for the next iteration - #endif // !AARCH64 __ b(loop, ne); // if not at bottom then check this entry __ bind(exit); } __ cbnz(Rentry, allocated); // check if a slot has been found; if found, continue with that one --- 4401,4426 ----
*** 4972,4987 **** { Label loop; assert_different_registers(Robj, Rentry, R2_tmp, Rtemp); // 1. compute new pointers - #ifdef AARCH64 - __ check_extended_sp(Rtemp); - __ sub(SP, SP, entry_size); // adjust extended SP - __ mov(Rtemp, SP); - __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); - #endif // AARCH64 __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); // old monitor block top / expression stack bottom __ sub(Rstack_top, Rstack_top, entry_size); // move expression stack top --- 4431,4440 ----
*** 4995,5019 **** // set new monitor block top // 2. move expression stack contents __ cmp(R2_tmp, Rentry); // check if expression stack is empty - #ifndef AARCH64 __ ldr(Rtemp, Address(R2_tmp, entry_size), ne); // load expression stack word from old location - #endif // !AARCH64 __ b(allocated, eq); __ bind(loop); - #ifdef AARCH64 - __ ldr(Rtemp, Address(R2_tmp, entry_size)); // load expression stack word from old location - #endif // AARCH64 __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location // and advance to next word __ cmp(R2_tmp, Rentry); // check if bottom reached - #ifndef AARCH64 __ ldr(Rtemp, Address(R2, entry_size), ne); // load expression stack word from old location - #endif // !AARCH64 __ b(loop, ne); // if not at bottom then copy next word } // call run-time routine --- 4448,4465 ----
*** 5058,5085 **** // points to current entry, starting with top-most entry __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize); // points to word before bottom of monitor block __ cmp(Rcur, Rbottom); // check if bottom reached - #ifndef AARCH64 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); // prefetch monitor's object for the first iteration - #endif // !AARCH64 __ b(throw_exception, eq); // throw exception if there are now monitors __ bind(loop); - #ifdef AARCH64 - __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes())); - #endif // AARCH64 // check if current entry is for same object __ cmp(Rcur_obj, Robj); __ b(found, eq); // if same object then stop searching __ add(Rcur, Rcur, entry_size); // otherwise advance to next entry __ cmp(Rcur, Rbottom); // check if bottom reached - #ifndef AARCH64 __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne); - #endif // !AARCH64 __ b (loop, ne); // if not at bottom then check this entry } // error handling. Unlocking was not block-structured __ bind(throw_exception); --- 4504,4524 ----
src/hotspot/cpu/arm/templateTable_arm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File