src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Mon Sep 17 10:29:38 2018
--- new/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Mon Sep 17 10:29:38 2018

*** 125,199 **** --- 125,166 ---- //------------------------------------------- Address LIR_Assembler::as_Address(LIR_Address* addr) { Register base = addr->base()->as_pointer_register(); #ifdef AARCH64 int align = exact_log2(type2aelembytes(addr->type(), true)); #endif if (addr->index()->is_illegal() || addr->index()->is_constant()) { int offset = addr->disp(); if (addr->index()->is_constant()) { offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale(); } #ifdef AARCH64 if (!Assembler::is_unsigned_imm_in_range(offset, 12, align) && !Assembler::is_imm_in_range(offset, 9, 0)) { BAILOUT_("offset not in range", Address(base)); } assert(UseUnalignedAccesses || (offset & right_n_bits(align)) == 0, "offset should be aligned"); #else if ((offset <= -4096) || (offset >= 4096)) { BAILOUT_("offset not in range", Address(base)); } #endif // AARCH64 return Address(base, offset); } else { assert(addr->disp() == 0, "can't have both"); int scale = addr->scale(); #ifdef AARCH64 assert((scale == 0) || (scale == align), "scale should be zero or equal to embedded shift"); bool is_index_extended = (addr->index()->type() == T_INT); if (is_index_extended) { assert(addr->index()->is_single_cpu(), "should be"); return Address(base, addr->index()->as_register(), ex_sxtw, scale); } else { assert(addr->index()->is_double_cpu(), "should be"); return Address(base, addr->index()->as_register_lo(), ex_lsl, scale); } #else assert(addr->index()->is_single_cpu(), "should be"); return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) : Address(base, addr->index()->as_register(), lsr, -scale); #endif // AARCH64 } } Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { #ifdef AARCH64 ShouldNotCallThis(); // Not used on AArch64 return Address(); #else Address base = as_Address(addr); assert(base.index() == noreg, "must be"); if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); } return Address(base.base(), base.disp() + BytesPerWord); #endif // AARCH64 } Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { #ifdef AARCH64 ShouldNotCallThis(); // Not used on AArch64 return Address(); #else return as_Address(addr); #endif // AARCH64 } void LIR_Assembler::osr_entry() { offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
*** 325,341 **** --- 292,303 ---- } int offset = code_offset(); __ mov_relative_address(LR, __ pc()); #ifdef AARCH64 __ raw_push(LR, LR); __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, Rtemp); #else __ push(LR); // stub expects LR to be saved __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg); #endif // AARCH64 assert(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); return offset;
*** 345,355 **** --- 307,316 ---- void LIR_Assembler::return_op(LIR_Opr result) { // Pop the frame before safepoint polling __ remove_frame(initial_frame_size_in_bytes()); // mov_slow here is usually one or two instruction // TODO-AARCH64 3 instructions on AArch64, so try to load polling page by ldr_literal __ mov_address(Rtemp, os::get_polling_page(), symbolic_Relocation::polling_page_reference); __ relocate(relocInfo::poll_return_type); __ ldr(Rtemp, Address(Rtemp)); __ ret(); }
*** 384,399 **** --- 345,356 ---- __ mov_slow(dest->as_register(), c->as_jint()); break; case T_LONG: assert(patch_code == lir_patch_none, "no patching handled here"); #ifdef AARCH64 __ mov_slow(dest->as_pointer_register(), (intptr_t)c->as_jlong()); #else __ mov_slow(dest->as_register_lo(), c->as_jint_lo()); __ mov_slow(dest->as_register_hi(), c->as_jint_hi()); #endif // AARCH64 break; case T_OBJECT: if (patch_code == lir_patch_none) { __ mov_oop(dest->as_register(), c->as_jobject());
*** 412,441 **** --- 369,390 ---- case T_FLOAT: if (dest->is_single_fpu()) { __ mov_float(dest->as_float_reg(), c->as_jfloat()); } else { #ifdef AARCH64 ShouldNotReachHere(); #else // Simple getters can return float constant directly into r0 __ mov_slow(dest->as_register(), c->as_jint_bits()); #endif // AARCH64 } break; case T_DOUBLE: if (dest->is_double_fpu()) { __ mov_double(dest->as_double_reg(), c->as_jdouble()); } else { #ifdef AARCH64 ShouldNotReachHere(); #else // Simple getters can return double constant directly into r1r0 __ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits()); __ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits()); #endif // AARCH64 } break; default: ShouldNotReachHere();
*** 464,567 **** --- 413,466 ---- __ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix())); break; case T_LONG: // fall through case T_DOUBLE: #ifdef AARCH64 __ mov_slow(Rtemp, c->as_jlong_bits()); __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix())); #else __ mov_slow(Rtemp, c->as_jint_lo_bits()); __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) { __ mov_slow(Rtemp, c->as_jint_hi_bits()); } __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); #endif // AARCH64 break; default: ShouldNotReachHere(); } } void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { #ifdef AARCH64 assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL) || (src->as_constant_ptr()->type() == T_INT && src->as_constant_ptr()->as_jint() == 0) || (src->as_constant_ptr()->type() == T_LONG && src->as_constant_ptr()->as_jlong() == 0) || (src->as_constant_ptr()->type() == T_FLOAT && src->as_constant_ptr()->as_jint_bits() == 0) || (src->as_constant_ptr()->type() == T_DOUBLE && src->as_constant_ptr()->as_jlong_bits() == 0), "cannot handle otherwise"); assert(dest->as_address_ptr()->type() == type, "should be"); Address addr = as_Address(dest->as_address_ptr()); int null_check_offset = code_offset(); switch (type) { case T_OBJECT: // fall through case T_ARRAY: if (UseCompressedOops && !wide) { __ str_w(ZR, addr); } else { __ str(ZR, addr); } break; case T_ADDRESS: // fall through case T_DOUBLE: // fall through case T_LONG: __ str(ZR, addr); break; case T_FLOAT: // fall through case T_INT: __ str_w(ZR, addr); break; case T_BOOLEAN: // fall through case T_BYTE: __ strb(ZR, addr); break; case T_CHAR: // fall through case T_SHORT: __ strh(ZR, addr); break; default: ShouldNotReachHere(); } #else assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise"); __ mov(Rtemp, 0); int null_check_offset = code_offset(); __ str(Rtemp, as_Address(dest->as_address_ptr())); #endif // AARCH64 if (info != NULL) { #ifndef AARCH64 assert(false, "arm32 didn't support this before, investigate if bug"); #endif add_debug_info_for_null_check(null_check_offset, info); } } void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) { assert(src->is_register() && dest->is_register(), "must be"); if (src->is_single_cpu()) { if (dest->is_single_cpu()) { move_regs(src->as_register(), dest->as_register()); #ifdef AARCH64 } else if (dest->is_double_cpu()) { assert ((src->type() == T_OBJECT) || (src->type() == T_ARRAY) || (src->type() == T_ADDRESS), "invalid src type"); move_regs(src->as_register(), dest->as_register_lo()); #else } else if (dest->is_single_fpu()) { __ fmsr(dest->as_float_reg(), src->as_register()); #endif // AARCH64 } else { ShouldNotReachHere(); } } else if (src->is_double_cpu()) { #ifdef AARCH64 move_regs(src->as_register_lo(), dest->as_register_lo()); #else if (dest->is_double_cpu()) { __ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi()); } else { __ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi()); } #endif // AARCH64 } else if (src->is_single_fpu()) { if (dest->is_single_fpu()) { __ mov_float(dest->as_float_reg(), src->as_float_reg()); } else if (dest->is_single_cpu()) { __ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg());
*** 570,584 **** --- 469,479 ---- } } else if (src->is_double_fpu()) { if (dest->is_double_fpu()) { __ mov_double(dest->as_double_reg(), src->as_double_reg()); } else if (dest->is_double_cpu()) { #ifdef AARCH64 __ fmov_xd(dest->as_register_lo(), src->as_double_reg()); #else __ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg()); #endif // AARCH64 } else { ShouldNotReachHere(); } } else { ShouldNotReachHere();
*** 591,606 **** --- 486,499 ---- Address addr = dest->is_single_word() ? frame_map()->address_for_slot(dest->single_stack_ix()) : frame_map()->address_for_slot(dest->double_stack_ix()); #ifndef AARCH64 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); if (src->is_single_fpu() || src->is_double_fpu()) { if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } } #endif // !AARCH64 if (src->is_single_cpu()) { switch (type) { case T_OBJECT: case T_ARRAY: __ verify_oop(src->as_register()); // fall through
*** 611,623 **** --- 504,514 ---- default: ShouldNotReachHere(); } } else if (src->is_double_cpu()) { __ str(src->as_register_lo(), addr); #ifndef AARCH64 __ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); #endif // !AARCH64 } else if (src->is_single_fpu()) { __ str_float(src->as_float_reg(), addr); } else if (src->is_double_fpu()) { __ str_double(src->as_double_reg(), addr); } else {
*** 634,678 **** --- 525,550 ---- Register base_reg = to_addr->base()->as_pointer_register(); const bool needs_patching = (patch_code != lir_patch_none); PatchingStub* patch = NULL; if (needs_patching) { #ifdef AARCH64 // Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned. __ align(wordSize); #endif patch = new PatchingStub(_masm, PatchingStub::access_field_id); #ifdef AARCH64 // Extra nop for MT safe patching __ nop(); #endif // AARCH64 } int null_check_offset = code_offset(); switch (type) { case T_ARRAY: case T_OBJECT: if (UseCompressedOops && !wide) { #ifdef AARCH64 const Register temp_src = Rtemp; assert_different_registers(temp_src, src->as_register()); __ encode_heap_oop(temp_src, src->as_register()); null_check_offset = code_offset(); __ str_32(temp_src, as_Address(to_addr)); #else ShouldNotReachHere(); #endif // AARCH64 } else { __ str(src->as_register(), as_Address(to_addr)); } break; case T_ADDRESS: #ifdef AARCH64 case T_LONG: #endif // AARCH64 __ str(src->as_pointer_register(), as_Address(to_addr)); break; case T_BYTE: case T_BOOLEAN:
*** 689,709 **** --- 561,570 ---- case T_FLOAT: #endif // __SOFTFP__ __ str_32(src->as_register(), as_Address(to_addr)); break; #ifdef AARCH64 case T_FLOAT: __ str_s(src->as_float_reg(), as_Address(to_addr)); break; case T_DOUBLE: __ str_d(src->as_double_reg(), as_Address(to_addr)); break; #else // AARCH64 #ifdef __SOFTFP__ case T_DOUBLE: #endif // __SOFTFP__ case T_LONG: {
*** 763,773 **** --- 624,633 ---- __ fstd(src->as_double_reg(), as_Address(to_addr)); } break; #endif // __SOFTFP__ #endif // AARCH64 default: ShouldNotReachHere(); }
*** 791,806 **** --- 651,664 ---- Address addr = src->is_single_word() ? frame_map()->address_for_slot(src->single_stack_ix()) : frame_map()->address_for_slot(src->double_stack_ix()); #ifndef AARCH64 assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending"); if (dest->is_single_fpu() || dest->is_double_fpu()) { if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); } } #endif // !AARCH64 if (dest->is_single_cpu()) { switch (type) { case T_OBJECT: case T_ARRAY:
*** 814,826 **** --- 672,682 ---- if ((type == T_OBJECT) || (type == T_ARRAY)) { __ verify_oop(dest->as_register()); } } else if (dest->is_double_cpu()) { __ ldr(dest->as_register_lo(), addr); #ifndef AARCH64 __ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); #endif // !AARCH64 } else if (dest->is_single_fpu()) { __ ldr_float(dest->as_float_reg(), addr); } else if (dest->is_double_fpu()) { __ ldr_double(dest->as_double_reg(), addr); } else {
*** 851,866 **** --- 707,718 ---- } } else { assert(src->is_double_stack(), "must be"); __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes)); __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes)); #ifdef AARCH64 assert(lo_word_offset_in_bytes == 0, "adjust this code"); #else __ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes)); __ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes)); #endif // AARCH64 } } void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
*** 873,886 **** --- 725,734 ---- Register base_reg = addr->base()->as_pointer_register(); PatchingStub* patch = NULL; if (patch_code != lir_patch_none) { patch = new PatchingStub(_masm, PatchingStub::access_field_id); #ifdef AARCH64 // Extra nop for MT safe patching __ nop(); #endif // AARCH64 } if (info != NULL) { add_debug_info_for_null_check_here(info); }
*** 900,917 **** --- 748,761 ---- } else { __ ldr(dest->as_pointer_register(), as_Address(addr)); } break; #ifdef AARCH64 case T_LONG: #else case T_INT: #ifdef __SOFTFP__ case T_FLOAT: #endif // __SOFTFP__ #endif // AARCH64 __ ldr(dest->as_pointer_register(), as_Address(addr)); break; case T_BOOLEAN: __ ldrb(dest->as_register(), as_Address(addr));
*** 927,951 **** --- 771,780 ---- case T_SHORT: __ ldrsh(dest->as_register(), as_Address(addr)); break; #ifdef AARCH64 case T_INT: __ ldr_w(dest->as_register(), as_Address(addr)); break; case T_FLOAT: __ ldr_s(dest->as_float_reg(), as_Address(addr)); break; case T_DOUBLE: __ ldr_d(dest->as_double_reg(), as_Address(addr)); break; #else // AARCH64 #ifdef __SOFTFP__ case T_DOUBLE: #endif // __SOFTFP__ case T_LONG: {
*** 1005,1015 **** --- 834,843 ---- __ fldd(dest->as_double_reg(), as_Address(addr)); } break; #endif // __SOFTFP__ #endif // AARCH64 default: ShouldNotReachHere(); }
*** 1019,1045 **** --- 847,856 ---- // that will deal with larger offsets. __ nop(); patching_epilog(patch, patch_code, base_reg, info); } #ifdef AARCH64 switch (type) { case T_ARRAY: case T_OBJECT: if (UseCompressedOops && !wide) { __ decode_heap_oop(dest->as_register()); } __ verify_oop(dest->as_register()); break; case T_ADDRESS: if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { __ decode_klass_not_null(dest->as_register()); } break; } #endif // AARCH64 } void LIR_Assembler::emit_op3(LIR_Op3* op) { bool is_32 = op->result_opr()->is_single_cpu();
*** 1062,1113 **** --- 873,889 ---- __ asr_32(dest, dest, power); // dest = dest >>> power; } else { // x/0x80000000 is a special case, since dividend is a power of two, but is negative. // The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000. __ cmp_32(left, c); #ifdef AARCH64 __ cset(dest, eq); #else __ mov(dest, 0, ne); __ mov(dest, 1, eq); #endif // AARCH64 } } else { #ifdef AARCH64 Register left = op->in_opr1()->as_pointer_register(); Register right = op->in_opr2()->as_pointer_register(); Register dest = op->result_opr()->as_pointer_register(); switch (op->code()) { case lir_idiv: if (is_32) { __ sdiv_w(dest, left, right); } else { __ sdiv(dest, left, right); } break; case lir_irem: { Register tmp = op->in_opr3()->as_pointer_register(); assert_different_registers(left, tmp); assert_different_registers(right, tmp); if (is_32) { __ sdiv_w(tmp, left, right); __ msub_w(dest, right, tmp, left); } else { __ sdiv(tmp, left, right); __ msub(dest, right, tmp, left); } break; } default: ShouldNotReachHere(); } #else assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3"); __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type); add_debug_info_for_div0_here(op->info()); #endif // AARCH64 } } void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
*** 1120,1132 **** --- 896,906 ---- #ifdef __SOFTFP__ assert (op->code() != lir_cond_float_branch, "this should be impossible"); #else if (op->code() == lir_cond_float_branch) { #ifndef AARCH64 __ fmstat(); #endif // !AARCH64 __ b(*(op->ublock()->label()), vs); } #endif // __SOFTFP__ AsmCondition acond = al;
*** 1149,1164 **** --- 923,934 ---- LIR_Opr src = op->in_opr(); LIR_Opr dest = op->result_opr(); switch (op->bytecode()) { case Bytecodes::_i2l: #ifdef AARCH64 __ sign_extend(dest->as_register_lo(), src->as_register(), 32); #else move_regs(src->as_register(), dest->as_register_lo()); __ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31)); #endif // AARCH64 break; case Bytecodes::_l2i: move_regs(src->as_register_lo(), dest->as_register()); break; case Bytecodes::_i2b:
*** 1175,1229 **** --- 945,969 ---- break; case Bytecodes::_d2f: __ convert_d2f(dest->as_float_reg(), src->as_double_reg()); break; case Bytecodes::_i2f: #ifdef AARCH64 __ scvtf_sw(dest->as_float_reg(), src->as_register()); #else __ fmsr(Stemp, src->as_register()); __ fsitos(dest->as_float_reg(), Stemp); #endif // AARCH64 break; case Bytecodes::_i2d: #ifdef AARCH64 __ scvtf_dw(dest->as_double_reg(), src->as_register()); #else __ fmsr(Stemp, src->as_register()); __ fsitod(dest->as_double_reg(), Stemp); #endif // AARCH64 break; case Bytecodes::_f2i: #ifdef AARCH64 __ fcvtzs_ws(dest->as_register(), src->as_float_reg()); #else __ ftosizs(Stemp, src->as_float_reg()); __ fmrs(dest->as_register(), Stemp); #endif // AARCH64 break; case Bytecodes::_d2i: #ifdef AARCH64 __ fcvtzs_wd(dest->as_register(), src->as_double_reg()); #else __ ftosizd(Stemp, src->as_double_reg()); __ fmrs(dest->as_register(), Stemp); #endif // AARCH64 break; #ifdef AARCH64 case Bytecodes::_l2f: __ scvtf_sx(dest->as_float_reg(), src->as_register_lo()); break; case Bytecodes::_l2d: __ scvtf_dx(dest->as_double_reg(), src->as_register_lo()); break; case Bytecodes::_f2l: __ fcvtzs_xs(dest->as_register_lo(), src->as_float_reg()); break; case Bytecodes::_d2l: __ fcvtzs_xd(dest->as_register_lo(), src->as_double_reg()); break; #endif // AARCH64 default: ShouldNotReachHere(); } }
*** 1325,1339 **** --- 1065,1075 ---- Register obj, Register mdo, Register data_val, Label* obj_is_null) { assert(method != NULL, "Should have method"); assert_different_registers(obj, mdo, data_val); setup_md_access(method, bci, md, data, mdo_offset_bias); Label not_null; #ifdef AARCH64 __ cbnz(obj, not_null); #else __ b(not_null, ne); #endif // AARCH64 __ mov_metadata(mdo, md->constant_encoding()); if (mdo_offset_bias > 0) { __ mov_slow(data_val, mdo_offset_bias); __ add(mdo, mdo, data_val); }
*** 1371,1387 **** --- 1107,1119 ---- __ sub(tmp1, tmp1, DataLayout::counter_increment); __ str(tmp1, data_addr); __ b(*failure); } - // Sets `res` to true, if `cond` holds. On AArch64 also sets `res` to false if `cond` does not hold. static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) { #ifdef AARCH64 __ cset(res, cond); #else __ mov(res, 1, cond); #endif // AARCH64 } void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // TODO: ARM - can be more effective with one more register
*** 1404,1416 **** --- 1136,1146 ---- Label profile_cast_success, profile_cast_failure, done; Label *success_target = op->should_profile() ? &profile_cast_success : &done; Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); if (op->should_profile()) { #ifndef AARCH64 __ cmp(value, 0); #endif // !AARCH64 typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done); } else { __ cbz(value, done); } assert_different_registers(k_RInfo, value);
*** 1468,1528 **** --- 1198,1207 ---- Label profile_cast_failure, profile_cast_success; Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry(); Label *success_target = op->should_profile() ? &profile_cast_success : &done; #ifdef AARCH64 move_regs(obj, res); if (op->should_profile()) { typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); } else { __ cbz(obj, done); } if (k->is_loaded()) { __ mov_metadata(k_RInfo, k->constant_encoding()); } else { if (res != obj) { op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); } klass2reg_with_patching(k_RInfo, op->info_for_patch()); } __ load_klass(klass_RInfo, res); if (op->fast_check()) { __ cmp(klass_RInfo, k_RInfo); __ b(*failure_target, ne); } else if (k->is_loaded()) { __ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset())); if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) { __ cmp(Rtemp, k_RInfo); __ b(*failure_target, ne); } else { __ cmp(klass_RInfo, k_RInfo); __ cond_cmp(Rtemp, k_RInfo, ne); __ b(*success_target, eq); assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } } else { __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); // check for immediate positive hit __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); __ cmp(klass_RInfo, k_RInfo); __ cond_cmp(Rtemp, k_RInfo, ne); __ b(*success_target, eq); // check for immediate negative hit __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } #else // AARCH64 __ movs(res, obj); if (op->should_profile()) { typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); } else {
*** 1573,1583 **** --- 1252,1261 ---- // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } #endif // AARCH64 if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1, &profile_cast_success, &profile_cast_failure,
*** 1603,1641 **** --- 1281,1309 ---- Label profile_cast_failure, profile_cast_success; Label *failure_target = op->should_profile() ? &profile_cast_failure : &done; Label *success_target = op->should_profile() ? &profile_cast_success : &done; #ifdef AARCH64 move_regs(obj, res); #else __ movs(res, obj); #endif // AARCH64 if (op->should_profile()) { typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done); } else { #ifdef AARCH64 __ cbz(obj, done); // If obj == NULL, res is false #else __ b(done, eq); #endif // AARCH64 } if (k->is_loaded()) { __ mov_metadata(k_RInfo, k->constant_encoding()); } else { op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res)); klass2reg_with_patching(k_RInfo, op->info_for_patch()); } __ load_klass(klass_RInfo, res); #ifndef AARCH64 if (!op->should_profile()) { __ mov(res, 0); } #endif // !AARCH64 if (op->fast_check()) { __ cmp(klass_RInfo, k_RInfo); if (!op->should_profile()) { set_instanceof_result(_masm, res, eq);
*** 1669,1707 **** --- 1337,1361 ---- } else { __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); // check for immediate positive hit __ cmp(klass_RInfo, k_RInfo); if (!op->should_profile()) { #ifdef AARCH64 // TODO-AARCH64 check if separate conditional branch is more efficient than ldr+cond_cmp __ ldr(res, Address(klass_RInfo, Rtemp)); #else __ ldr(res, Address(klass_RInfo, Rtemp), ne); #endif // AARCH64 __ cond_cmp(res, k_RInfo, ne); set_instanceof_result(_masm, res, eq); } else { #ifdef AARCH64 // TODO-AARCH64 check if separate conditional branch is more efficient than ldr+cond_cmp __ ldr(Rtemp, Address(klass_RInfo, Rtemp)); #else __ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne); #endif // AARCH64 __ cond_cmp(Rtemp, k_RInfo, ne); } __ b(*success_target, eq); // check for immediate negative hit if (op->should_profile()) { __ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset())); } __ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset())); if (!op->should_profile()) { #ifdef AARCH64 __ mov(res, 0); #else __ mov(res, 0, ne); #endif // AARCH64 } __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
*** 1739,1783 **** --- 1393,1402 ---- // *addr = newval; // dest = 1; // } else { // dest = 0; // } #ifdef AARCH64 Label retry, done; Register addr = op->addr()->as_pointer_register(); Register cmpval = op->cmp_value()->as_pointer_register(); Register newval = op->new_value()->as_pointer_register(); Register dest = op->result_opr()->as_pointer_register(); assert_different_registers(dest, addr, cmpval, newval, Rtemp); if (UseCompressedOops && op->code() == lir_cas_obj) { Register tmp1 = op->tmp1()->as_pointer_register(); Register tmp2 = op->tmp2()->as_pointer_register(); assert_different_registers(dest, addr, cmpval, newval, tmp1, tmp2, Rtemp); __ encode_heap_oop(tmp1, cmpval); cmpval = tmp1; __ encode_heap_oop(tmp2, newval); newval = tmp2; } __ mov(dest, ZR); __ bind(retry); if (((op->code() == lir_cas_obj) && !UseCompressedOops) || op->code() == lir_cas_long) { __ ldaxr(Rtemp, addr); __ cmp(Rtemp, cmpval); __ b(done, ne); __ stlxr(Rtemp, newval, addr); } else if (((op->code() == lir_cas_obj) && UseCompressedOops) || op->code() == lir_cas_int) { __ ldaxr_w(Rtemp, addr); __ cmp_w(Rtemp, cmpval); __ b(done, ne); __ stlxr_w(Rtemp, newval, addr); } else { ShouldNotReachHere(); } __ cbnz_w(Rtemp, retry); __ mov(dest, 1); __ bind(done); #else // FIXME: membar_release __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); Register addr = op->addr()->is_register() ? op->addr()->as_pointer_register() : op->addr()->as_address_ptr()->base()->as_pointer_register();
*** 1810,1820 **** --- 1429,1438 ---- __ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi, new_value_lo, new_value_hi, addr, 0); } else { Unimplemented(); } #endif // AARCH64 // FIXME: is full membar really needed instead of just membar_acquire? __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); }
*** 1833,1872 **** --- 1451,1460 ---- case lir_cond_belowEqual: acond = ls; ncond = hi; break; default: ShouldNotReachHere(); } } #ifdef AARCH64 // TODO-AARCH64 implement it more efficiently if (opr1->is_register()) { reg2reg(opr1, result); } else if (opr1->is_stack()) { stack2reg(opr1, result, result->type()); } else if (opr1->is_constant()) { const2reg(opr1, result, lir_patch_none, NULL); } else { ShouldNotReachHere(); } Label skip; __ b(skip, acond); if (opr2->is_register()) { reg2reg(opr2, result); } else if (opr2->is_stack()) { stack2reg(opr2, result, result->type()); } else if (opr2->is_constant()) { const2reg(opr2, result, lir_patch_none, NULL); } else { ShouldNotReachHere(); } __ bind(skip); #else for (;;) { // two iterations only if (opr1 == result) { // do nothing } else if (opr1->is_single_cpu()) { __ mov(result->as_register(), opr1->as_register(), acond);
*** 1922,1935 **** --- 1510,1522 ---- // Negate the condition and repeat the algorithm with the second operand if (opr1 == opr2) { break; } opr1 = opr2; acond = ncond; } #endif // AARCH64 } - #if defined(AARCH64) || defined(ASSERT) static int reg_size(LIR_Opr op) { switch (op->type()) { case T_FLOAT: case T_INT: return BytesPerInt; case T_LONG:
*** 1957,1997 **** --- 1544,1553 ---- assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be"); int scale = addr->scale(); AsmShift shift = lsl; #ifdef AARCH64 bool is_index_extended = reg_size(addr->base()) > reg_size(addr->index()); if (scale < 0) { scale = -scale; shift = lsr; } assert(shift == lsl || !is_index_extended, "could not have extend and right shift in one operand"); assert(0 <= scale && scale <= 63, "scale is too large"); if (is_index_extended) { assert(scale <= 4, "scale is too large for add with extended register"); assert(addr->index()->is_single_cpu(), "should be"); assert(addr->index()->type() == T_INT, "should be"); assert(dest->is_double_cpu(), "should be"); assert(code == lir_add, "special case of add with extended register"); __ add(res, lreg, addr->index()->as_register(), ex_sxtw, scale); return; } else if (reg_size(dest) == BytesPerInt) { assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); assert(reg_size(addr->base()) == reg_size(dest), "should be"); AsmOperand operand(addr->index()->as_pointer_register(), shift, scale); switch (code) { case lir_add: __ add_32(res, lreg, operand); break; case lir_sub: __ sub_32(res, lreg, operand); break; default: ShouldNotReachHere(); } return; } #endif // AARCH64 assert(reg_size(addr->base()) == reg_size(addr->index()), "should be"); assert(reg_size(addr->base()) == reg_size(dest), "should be"); assert(reg_size(dest) == wordSize, "should be");
*** 2000,2026 **** --- 1556,1575 ---- case lir_add: __ add(res, lreg, operand); break; case lir_sub: __ sub(res, lreg, operand); break; default: ShouldNotReachHere(); } #ifndef AARCH64 } else if (left->is_address()) { assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()"); const LIR_Address* addr = left->as_address_ptr(); const Register res = dest->as_register(); const Register rreg = right->as_register(); assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be"); __ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale())); #endif // !AARCH64 } else if (dest->is_single_cpu()) { assert(left->is_single_cpu(), "unexpected left operand"); #ifdef AARCH64 assert(dest->type() == T_INT, "unexpected dest type"); assert(left->type() == T_INT, "unexpected left type"); assert(right->type() == T_INT, "unexpected right type"); #endif // AARCH64 const Register res = dest->as_register(); const Register lreg = left->as_register(); if (right->is_single_cpu()) {
*** 2043,2082 **** --- 1592,1601 ---- default: ShouldNotReachHere(); } } } else if (dest->is_double_cpu()) { #ifdef AARCH64 assert(left->is_double_cpu() || (left->is_single_cpu() && ((left->type() == T_OBJECT) || (left->type() == T_ARRAY) || (left->type() == T_ADDRESS))), "unexpected left operand"); const Register res = dest->as_register_lo(); const Register lreg = left->as_pointer_register(); if (right->is_constant()) { assert(right->type() == T_LONG, "unexpected right type"); assert((right->as_constant_ptr()->as_jlong() >> 24) == 0, "out of range"); jint imm = (jint)right->as_constant_ptr()->as_jlong(); switch (code) { case lir_add: __ add(res, lreg, imm); break; case lir_sub: __ sub(res, lreg, imm); break; default: ShouldNotReachHere(); } } else { assert(right->is_double_cpu() || (right->is_single_cpu() && ((right->type() == T_OBJECT) || (right->type() == T_ARRAY) || (right->type() == T_ADDRESS))), "unexpected right operand"); const Register rreg = right->as_pointer_register(); switch (code) { case lir_add: __ add(res, lreg, rreg); break; case lir_sub: __ sub(res, lreg, rreg); break; case lir_mul: __ mul(res, lreg, rreg); break; default: ShouldNotReachHere(); } } #else // AARCH64 Register res_lo = dest->as_register_lo(); Register res_hi = dest->as_register_hi(); Register lreg_lo = left->as_register_lo(); Register lreg_hi = left->as_register_hi(); if (right->is_double_cpu()) {
*** 2116,2126 **** --- 1635,1644 ---- default: ShouldNotReachHere(); } } move_regs(res_lo, dest->as_register_lo()); #endif // AARCH64 } else if (dest->is_single_fpu()) { assert(left->is_single_fpu(), "must be"); assert(right->is_single_fpu(), "must be"); const FloatRegister res = dest->as_float_reg();
*** 2173,2187 **** --- 1691,1700 ---- void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { assert(dest->is_register(), "wrong items state"); assert(left->is_register(), "wrong items state"); if (dest->is_single_cpu()) { #ifdef AARCH64 assert (dest->type() == T_INT, "unexpected result type"); assert (left->type() == T_INT, "unexpected left type"); assert (right->type() == T_INT, "unexpected right type"); #endif // AARCH64 const Register res = dest->as_register(); const Register lreg = left->as_register(); if (right->is_single_cpu()) {
*** 2204,2237 **** --- 1717,1735 ---- } } else { assert(dest->is_double_cpu(), "should be"); Register res_lo = dest->as_register_lo(); #ifdef AARCH64 assert ((left->is_single_cpu() && left->is_oop_register()) || left->is_double_cpu(), "should be"); const Register lreg_lo = left->as_pointer_register(); #else assert (dest->type() == T_LONG, "unexpected result type"); assert (left->type() == T_LONG, "unexpected left type"); assert (right->type() == T_LONG, "unexpected right type"); const Register res_hi = dest->as_register_hi(); const Register lreg_lo = left->as_register_lo(); const Register lreg_hi = left->as_register_hi(); #endif // AARCH64 if (right->is_register()) { #ifdef AARCH64 assert ((right->is_single_cpu() && right->is_oop_register()) || right->is_double_cpu(), "should be"); const Register rreg_lo = right->as_pointer_register(); switch (code) { case lir_logic_and: __ andr(res_lo, lreg_lo, rreg_lo); break; case lir_logic_or: __ orr (res_lo, lreg_lo, rreg_lo); break; case lir_logic_xor: __ eor (res_lo, lreg_lo, rreg_lo); break; default: ShouldNotReachHere(); } #else const Register rreg_lo = right->as_register_lo(); const Register rreg_hi = right->as_register_hi(); if (res_lo == lreg_hi || res_lo == rreg_hi) { res_lo = Rtemp; // Temp register helps to avoid overlap between result and input }
*** 2250,2276 **** --- 1748,1759 ---- break; default: ShouldNotReachHere(); } move_regs(res_lo, dest->as_register_lo()); #endif // AARCH64 } else { assert(right->is_constant(), "must be"); #ifdef AARCH64 const julong c = (julong)right->as_constant_ptr()->as_jlong(); Assembler::LogicalImmediate imm(c, false); if (imm.is_encoded()) { switch (code) { case lir_logic_and: __ andr(res_lo, lreg_lo, imm); break; case lir_logic_or: __ orr (res_lo, lreg_lo, imm); break; case lir_logic_xor: __ eor (res_lo, lreg_lo, imm); break; default: ShouldNotReachHere(); } } else { BAILOUT("64 bit constant cannot be inlined"); } #else const jint c_lo = (jint) right->as_constant_ptr()->as_jlong(); const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32); // Case for logic_or from do_ClassIDIntrinsic() if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) { switch (code) {
*** 2301,2340 **** --- 1784,1798 ---- __ mov(res_hi, lreg_hi); } } else { BAILOUT("64 bit constant cannot be inlined"); } #endif // AARCH64 } } } #ifdef AARCH64 void LIR_Assembler::long_compare_helper(LIR_Opr opr1, LIR_Opr opr2) { assert(opr1->is_double_cpu(), "should be"); Register x = opr1->as_register_lo(); if (opr2->is_double_cpu()) { Register y = opr2->as_register_lo(); __ cmp(x, y); } else { assert(opr2->is_constant(), "should be"); assert(opr2->as_constant_ptr()->type() == T_LONG, "long constant expected"); jlong c = opr2->as_jlong(); assert(((c >> 31) == 0) || ((c >> 31) == -1), "immediate is out of range"); if (c >= 0) { __ cmp(x, (jint)c); } else { __ cmn(x, (jint)(-c)); } } } #endif // AARCH64 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { if (opr1->is_single_cpu()) { if (opr2->is_constant()) { switch (opr2->as_constant_ptr()->type()) {
*** 2368,2380 **** --- 1826,1835 ---- } } else { ShouldNotReachHere(); } } else if (opr1->is_double_cpu()) { #ifdef AARCH64 long_compare_helper(opr1, opr2); #else Register xlo = opr1->as_register_lo(); Register xhi = opr1->as_register_hi(); if (opr2->is_constant() && opr2->as_jlong() == 0) { assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise"); __ orrs(Rtemp, xlo, xhi);
*** 2389,2399 **** --- 1844,1853 ---- __ sbcs(xhi, xhi, yhi); } } else { ShouldNotReachHere(); } #endif // AARCH64 } else if (opr1->is_single_fpu()) { if (opr2->is_constant()) { assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise"); __ cmp_zero_float(opr1->as_float_reg()); } else {
*** 2413,2451 **** --- 1867,1889 ---- void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { const Register res = dst->as_register(); if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { comp_op(lir_cond_unknown, left, right, op); #ifdef AARCH64 if (code == lir_ucmp_fd2i) { // unordered is less __ cset(res, gt); // 1 if '>', else 0 __ csinv(res, res, ZR, ge); // previous value if '>=', else -1 } else { __ cset(res, hi); // 1 if '>' or unordered, else 0 __ csinv(res, res, ZR, pl); // previous value if '>=' or unordered, else -1 } #else __ fmstat(); if (code == lir_ucmp_fd2i) { // unordered is less __ mvn(res, 0, lt); __ mov(res, 1, ge); } else { // unordered is greater __ mov(res, 1, cs); __ mvn(res, 0, cc); } __ mov(res, 0, eq); #endif // AARCH64 } else { assert(code == lir_cmp_l2i, "must be"); #ifdef AARCH64 long_compare_helper(left, right); __ cset(res, gt); // 1 if '>', else 0 __ csinv(res, res, ZR, ge); // previous value if '>=', else -1 #else Label done; const Register xlo = left->as_register_lo(); const Register xhi = left->as_register_hi(); const Register ylo = right->as_register_lo(); const Register yhi = right->as_register_hi();
*** 2455,2465 **** --- 1893,1902 ---- __ b(done, ne); __ subs(res, xlo, ylo); __ mov(res, 1, hi); __ mvn(res, 0, lo); __ bind(done); #endif // AARCH64 } } void LIR_Assembler::align_call(LIR_Code code) {
*** 2476,2498 **** --- 1913,1931 ---- void LIR_Assembler::ic_call(LIR_OpJavaCall *op) { bool near_range = __ cache_fully_reachable(); address oop_address = pc(); ! bool use_movw = AARCH64_ONLY(false) NOT_AARCH64(VM_Version::supports_movw()); // Ricklass may contain something that is not a metadata pointer so // mov_metadata can't be used InlinedAddress value((address)Universe::non_oop_word()); InlinedAddress addr(op->addr()); if (use_movw) { #ifdef AARCH64 ShouldNotReachHere(); #else __ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff); __ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16); #endif // AARCH64 } else { // No movw/movt, must be load a pc relative value but no // relocation so no metadata table to load from. // Use a b instruction rather than a bl, inline constant after the // branch, use a PC relative ldr to load the constant, arrange for
*** 2580,2618 **** --- 2013,2022 ---- assert(exceptionOop->as_register() == Rexception_obj, "must match"); __ b(_unwind_handler_entry); } void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { #ifdef AARCH64 if (dest->is_single_cpu()) { Register res = dest->as_register(); Register x = left->as_register(); Register y = count->as_register(); assert (dest->type() == T_INT, "unexpected result type"); assert (left->type() == T_INT, "unexpected left type"); switch (code) { case lir_shl: __ lslv_w(res, x, y); break; case lir_shr: __ asrv_w(res, x, y); break; case lir_ushr: __ lsrv_w(res, x, y); break; default: ShouldNotReachHere(); } } else if (dest->is_double_cpu()) { Register res = dest->as_register_lo(); Register x = left->as_register_lo(); Register y = count->as_register(); switch (code) { case lir_shl: __ lslv(res, x, y); break; case lir_shr: __ asrv(res, x, y); break; case lir_ushr: __ lsrv(res, x, y); break; default: ShouldNotReachHere(); } } else { ShouldNotReachHere(); } #else AsmShift shift = lsl; switch (code) { case lir_shl: shift = lsl; break; case lir_shr: shift = asr; break; case lir_ushr: shift = lsr; break;
*** 2643,2689 **** --- 2047,2060 ---- move_regs(dest_lo, dest->as_register_lo()); move_regs(dest_hi, dest->as_register_hi()); } else { ShouldNotReachHere(); } #endif // AARCH64 } void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { #ifdef AARCH64 if (dest->is_single_cpu()) { assert (dest->type() == T_INT, "unexpected result type"); assert (left->type() == T_INT, "unexpected left type"); count &= 31; if (count != 0) { switch (code) { case lir_shl: __ _lsl_w(dest->as_register(), left->as_register(), count); break; case lir_shr: __ _asr_w(dest->as_register(), left->as_register(), count); break; case lir_ushr: __ _lsr_w(dest->as_register(), left->as_register(), count); break; default: ShouldNotReachHere(); } } else { move_regs(left->as_register(), dest->as_register()); } } else if (dest->is_double_cpu()) { count &= 63; if (count != 0) { switch (code) { case lir_shl: __ _lsl(dest->as_register_lo(), left->as_register_lo(), count); break; case lir_shr: __ _asr(dest->as_register_lo(), left->as_register_lo(), count); break; case lir_ushr: __ _lsr(dest->as_register_lo(), left->as_register_lo(), count); break; default: ShouldNotReachHere(); } } else { move_regs(left->as_register_lo(), dest->as_register_lo()); } } else { ShouldNotReachHere(); } #else AsmShift shift = lsl; switch (code) { case lir_shl: shift = lsl; break; case lir_shr: shift = asr; break; case lir_ushr: shift = lsr; break;
*** 2718,2750 **** --- 2089,2110 ---- left->as_register_lo(), left->as_register_hi()); } } else { ShouldNotReachHere(); } #endif // AARCH64 } // Saves 4 given registers in reserved argument area. void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) { verify_reserved_argument_area_size(4); #ifdef AARCH64 __ stp(r1, r2, Address(SP, 0)); __ stp(r3, r4, Address(SP, 2*wordSize)); #else __ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4)); #endif // AARCH64 } // Restores 4 given registers from reserved argument area. void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) { #ifdef AARCH64 __ ldp(r1, r2, Address(SP, 0)); __ ldp(r3, r4, Address(SP, 2*wordSize)); #else __ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback); #endif // AARCH64 } void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { ciArrayKlass* default_type = op->expected_type();
*** 2755,2767 **** --- 2115,2124 ---- Register length = op->length()->as_register(); Register tmp = op->tmp()->as_register(); Register tmp2 = Rtemp; assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption"); #ifdef AARCH64 assert(length == R4, "code assumption"); #endif // AARCH64 CodeStub* stub = op->stub(); int flags = op->flags(); BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
*** 2771,2787 **** --- 2128,2139 ---- if (default_type == NULL) { // save arguments, because they will be killed by a runtime call save_in_reserved_area(R0, R1, R2, R3); #ifdef AARCH64 // save length argument, will be killed by a runtime call __ raw_push(length, ZR); #else // pass length argument on SP[0] __ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment #endif // AARCH64 address copyfunc_addr = StubRoutines::generic_arraycopy(); assert(copyfunc_addr != NULL, "generic arraycopy stub required"); #ifndef PRODUCT if (PrintC1Statistics) {
*** 2789,2803 **** --- 2141,2151 ---- } #endif // !PRODUCT // the stub is in the code cache so close enough __ call(copyfunc_addr, relocInfo::runtime_call_type); #ifdef AARCH64 __ raw_pop(length, ZR); #else __ add(SP, SP, 2*wordSize); #endif // AARCH64 __ cbz_32(R0, *stub->continuation()); __ mvn_32(tmp, R0); restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
*** 2967,2977 **** --- 2315,2325 ---- Register src_ptr = R0; Register dst_ptr = R1; Register len = R2; Register chk_off = R3; ! Register super_k = AARCH64_ONLY(R4) NOT_AARCH64(tmp); __ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); __ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift); __ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
*** 2979,3002 **** --- 2327,2341 ---- __ load_klass(tmp, dst); int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); int sco_offset = in_bytes(Klass::super_check_offset_offset()); #ifdef AARCH64 __ raw_push(length, ZR); // Preserve length around *copyfunc_addr call __ mov(len, length); __ ldr(super_k, Address(tmp, ek_offset)); // super_k == R4 == length, so this load cannot be performed earlier // TODO-AARCH64: check whether it is faster to load super klass early by using tmp and additional mov. __ ldr_u32(chk_off, Address(super_k, sco_offset)); #else // AARCH64 __ ldr(super_k, Address(tmp, ek_offset)); __ mov(len, length); __ ldr_u32(chk_off, Address(super_k, sco_offset)); __ push(super_k); #endif // AARCH64 __ call(copyfunc_addr, relocInfo::runtime_call_type); #ifndef PRODUCT if (PrintC1Statistics) {
*** 3005,3019 **** --- 2344,2354 ---- __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2); __ bind(failed); } #endif // PRODUCT #ifdef AARCH64 __ raw_pop(length, ZR); #else __ add(SP, SP, wordSize); // Drop super_k argument #endif // AARCH64 __ cbz_32(R0, *stub->continuation()); __ mvn_32(tmp, R0); // load saved arguments in slow case only
*** 3071,3083 **** --- 2406,2415 ---- #ifdef ASSERT // emit run-time assertion void LIR_Assembler::emit_assert(LIR_OpAssert* op) { assert(op->code() == lir_assert, "must be"); #ifdef AARCH64 __ NOT_IMPLEMENTED(); #else if (op->in_opr1()->is_valid()) { assert(op->in_opr2()->is_valid(), "both operands must be valid"); comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op); } else { assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
*** 3105,3115 **** --- 2437,2446 ---- __ stop(str); } else { breakpoint(); } __ bind(ok); #endif // AARCH64 } #endif // ASSERT void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { fatal("CRC32 intrinsic is not implemented on this platform");
*** 3154,3164 **** --- 2485,2495 ---- assert(op->tmp1()->is_register(), "tmp1 must be allocated"); Register tmp1 = op->tmp1()->as_pointer_register(); assert_different_registers(mdo, tmp1); __ mov_metadata(mdo, md->constant_encoding()); int mdo_offset_bias = 0; ! int max_offset = AARCH64_ONLY(4096 << LogBytesPerWord) NOT_AARCH64(4096); if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) { // The offset is large so bias the mdo by the base of the slot so // that the ldr can use an immediate offset to reference the slots of the data mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); __ mov_slow(tmp1, mdo_offset_bias);
*** 3250,3260 **** --- 2581,2590 ---- __ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp()); } void LIR_Assembler::align_backward_branch_target() { // TODO-AARCH64 review it // Some ARM processors do better with 8-byte branch target alignment __ align(8); }
*** 3263,3286 **** --- 2593,2612 ---- if (left->is_single_cpu()) { assert (dest->type() == T_INT, "unexpected result type"); assert (left->type() == T_INT, "unexpected left type"); __ neg_32(dest->as_register(), left->as_register()); } else if (left->is_double_cpu()) { #ifdef AARCH64 __ neg(dest->as_register_lo(), left->as_register_lo()); #else Register dest_lo = dest->as_register_lo(); Register dest_hi = dest->as_register_hi(); Register src_lo = left->as_register_lo(); Register src_hi = left->as_register_hi(); if (dest_lo == src_hi) { dest_lo = Rtemp; } __ rsbs(dest_lo, src_lo, 0); __ rsc(dest_hi, src_hi, 0); move_regs(dest_lo, dest->as_register_lo()); #endif // AARCH64 } else if (left->is_single_fpu()) { __ neg_float(dest->as_float_reg(), left->as_float_reg()); } else if (left->is_double_fpu()) { __ neg_double(dest->as_double_reg(), left->as_double_reg()); } else {
*** 3298,3310 **** --- 2624,2633 ---- BAILOUT("illegal arithmetic operand"); } __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c); } else { assert(addr->disp() == 0, "cannot handle otherwise"); #ifdef AARCH64 assert(addr->index()->is_double_cpu(), "should be"); #endif // AARCH64 __ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale())); } }
*** 3317,3329 **** --- 2640,2649 ---- } } void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { #ifdef AARCH64 Unimplemented(); // TODO-AARCH64: Use stlr/ldar instructions for volatile load/store #else assert(src->is_double_cpu() && dest->is_address() || src->is_address() && dest->is_double_cpu(), "Simple move_op is called for all other cases"); int null_check_offset;
*** 3361,3371 **** --- 2681,2690 ---- } if (info != NULL) { add_debug_info_for_null_check(null_check_offset, info); } #endif // AARCH64 } void LIR_Assembler::membar() { __ membar(MacroAssembler::StoreLoad, Rtemp);
*** 3403,3415 **** --- 2722,2731 ---- // Not used on ARM Unimplemented(); } void LIR_Assembler::peephole(LIR_List* lir) { #ifdef AARCH64 return; // TODO-AARCH64 implement peephole optimizations #endif LIR_OpList* inst = lir->instructions_list(); const int inst_length = inst->length(); for (int i = 0; i < inst_length; i++) { LIR_Op* op = inst->at(i); switch (op->code()) {
*** 3469,3510 **** --- 2785,2811 ---- } } } void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { #ifdef AARCH64 Register ptr = src->as_pointer_register(); #else assert(src->is_address(), "sanity"); Address addr = as_Address(src->as_address_ptr()); #endif if (code == lir_xchg) { #ifdef AARCH64 if (UseCompressedOops && data->is_oop()) { __ encode_heap_oop(tmp->as_pointer_register(), data->as_register()); } #endif // AARCH64 } else { assert (!data->is_oop(), "xadd for oops"); } #ifndef AARCH64 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp); #endif // !AARCH64 Label retry; __ bind(retry); ! if ((data->type() == T_INT) || (data->is_oop() AARCH64_ONLY(&& UseCompressedOops))) { ! if (data->type() == T_INT || data->is_oop()) { Register dst = dest->as_register(); Register new_val = noreg; #ifdef AARCH64 __ ldaxr_w(dst, ptr); #else __ ldrex(dst, addr); #endif if (code == lir_xadd) { Register tmp_reg = tmp->as_register(); if (data->is_constant()) { assert_different_registers(dst, tmp_reg); __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
*** 3519,3557 **** --- 2820,2831 ---- } else { new_val = data->as_register(); } assert_different_registers(dst, new_val); } #ifdef AARCH64 __ stlxr_w(Rtemp, new_val, ptr); #else __ strex(Rtemp, new_val, addr); #endif // AARCH64 #ifdef AARCH64 } else if ((data->type() == T_LONG) || (data->is_oop() && !UseCompressedOops)) { Register dst = dest->as_pointer_register(); Register new_val = noreg; __ ldaxr(dst, ptr); if (code == lir_xadd) { Register tmp_reg = tmp->as_pointer_register(); if (data->is_constant()) { assert_different_registers(dst, ptr, tmp_reg); jlong c = data->as_constant_ptr()->as_jlong(); assert((jlong)((jint)c) == c, "overflow"); __ add(tmp_reg, dst, (jint)c); } else { assert_different_registers(dst, ptr, tmp_reg, data->as_pointer_register()); __ add(tmp_reg, dst, data->as_pointer_register()); } new_val = tmp_reg; } else { new_val = data->as_pointer_register(); assert_different_registers(dst, ptr, new_val); } __ stlxr(Rtemp, new_val, ptr); #else } else if (data->type() == T_LONG) { Register dst_lo = dest->as_register_lo(); Register new_val_lo = noreg; Register dst_hi = dest->as_register_hi();
*** 3588,3608 **** --- 2862,2876 ---- assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi); assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair"); assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair"); } __ strexd(Rtemp, new_val_lo, addr); #endif // AARCH64 } else { ShouldNotReachHere(); } __ cbnz_32(Rtemp, retry); __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp); #ifdef AARCH64 if (UseCompressedOops && data->is_oop()) { __ decode_heap_oop(dest->as_register()); } #endif // AARCH64 } #undef __

src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File