1335 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1336 } else {
1337 __ mov_metadata(k_RInfo, k->constant_encoding());
1338 }
1339 __ verify_oop(obj);
1340
1341 if (op->fast_check()) {
1342 // get object class
1343 // not a safepoint as obj null check happens earlier
1344 __ load_klass(rscratch1, obj);
1345 __ cmp( rscratch1, k_RInfo);
1346
1347 __ br(Assembler::NE, *failure_target);
1348 // successful cast, fall through to profile or jump
1349 } else {
1350 // get object class
1351 // not a safepoint as obj null check happens earlier
1352 __ load_klass(klass_RInfo, obj);
1353 if (k->is_loaded()) {
1354 // See if we get an immediate positive hit
1355 __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1356 __ cmp(k_RInfo, rscratch1);
1357 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1358 __ br(Assembler::NE, *failure_target);
1359 // successful cast, fall through to profile or jump
1360 } else {
1361 // See if we get an immediate positive hit
1362 __ br(Assembler::EQ, *success_target);
1363 // check for self
1364 __ cmp(klass_RInfo, k_RInfo);
1365 __ br(Assembler::EQ, *success_target);
1366
1367 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1368 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1369 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1370 // result is a boolean
1371 __ cbzw(klass_RInfo, *failure_target);
1372 // successful cast, fall through to profile or jump
1373 }
1374 } else {
1375 // perform the fast part of the checking logic
1999 FloatRegister reg2 = opr2->as_double_reg();
2000 __ fcmpd(reg1, reg2);
2001 } else {
2002 ShouldNotReachHere();
2003 }
2004 }
2005
2006 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2007 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2008 bool is_unordered_less = (code == lir_ucmp_fd2i);
2009 if (left->is_single_fpu()) {
2010 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2011 } else if (left->is_double_fpu()) {
2012 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2013 } else {
2014 ShouldNotReachHere();
2015 }
2016 } else if (code == lir_cmp_l2i) {
2017 Label done;
2018 __ cmp(left->as_register_lo(), right->as_register_lo());
2019 __ mov(dst->as_register(), (u_int64_t)-1L);
2020 __ br(Assembler::LT, done);
2021 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2022 __ bind(done);
2023 } else {
2024 ShouldNotReachHere();
2025 }
2026 }
2027
2028
2029 void LIR_Assembler::align_call(LIR_Code code) { }
2030
2031
2032 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2033 address call = __ trampoline_call(Address(op->addr(), rtype));
2034 if (call == NULL) {
2035 bailout("trampoline stub overflow");
2036 return;
2037 }
2038 add_call_info(code_offset(), op->info());
2039 }
2267 // expects them.
2268 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2269 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2270 __ ldr(src, Address(sp, 4*BytesPerWord));
2271
2272 // r0 is -1^K where K == partial copied count
2273 __ eonw(rscratch1, r0, zr);
2274 // adjust length down and src/end pos up by partial copied count
2275 __ subw(length, length, rscratch1);
2276 __ addw(src_pos, src_pos, rscratch1);
2277 __ addw(dst_pos, dst_pos, rscratch1);
2278 __ b(*stub->entry());
2279
2280 __ bind(*stub->continuation());
2281 return;
2282 }
2283
2284 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2285
2286 int elem_size = type2aelembytes(basic_type);
2287 int shift_amount;
2288 int scale = exact_log2(elem_size);
2289
2290 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2291 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2292 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2293 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2294
2295 // test for NULL
2296 if (flags & LIR_OpArrayCopy::src_null_check) {
2297 __ cbz(src, *stub->entry());
2298 }
2299 if (flags & LIR_OpArrayCopy::dst_null_check) {
2300 __ cbz(dst, *stub->entry());
2301 }
2302
2303 // If the compiler was not able to prove that exact type of the source or the destination
2304 // of the arraycopy is an array type, check at runtime if the source or the destination is
2305 // an instance type.
2306 if (flags & LIR_OpArrayCopy::type_check) {
2307 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2658
2659
2660 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2661 Unimplemented();
2662 }
2663
2664
2665 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2666 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2667 }
2668
2669 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2670 assert(op->crc()->is_single_cpu(), "crc must be register");
2671 assert(op->val()->is_single_cpu(), "byte value must be register");
2672 assert(op->result_opr()->is_single_cpu(), "result must be register");
2673 Register crc = op->crc()->as_register();
2674 Register val = op->val()->as_register();
2675 Register res = op->result_opr()->as_register();
2676
2677 assert_different_registers(val, crc, res);
2678 unsigned long offset;
2679 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2680 if (offset) __ add(res, res, offset);
2681
2682 __ mvnw(crc, crc); // ~crc
2683 __ update_byte_crc32(crc, val, res);
2684 __ mvnw(res, crc); // ~crc
2685 }
2686
2687 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2688 COMMENT("emit_profile_type {");
2689 Register obj = op->obj()->as_register();
2690 Register tmp = op->tmp()->as_pointer_register();
2691 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2692 ciKlass* exact_klass = op->exact_klass();
2693 intptr_t current_klass = op->current_klass();
2694 bool not_null = op->not_null();
2695 bool no_conflict = op->no_conflict();
2696
2697 Label update, next, none;
2698
|
1335 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1336 } else {
1337 __ mov_metadata(k_RInfo, k->constant_encoding());
1338 }
1339 __ verify_oop(obj);
1340
1341 if (op->fast_check()) {
1342 // get object class
1343 // not a safepoint as obj null check happens earlier
1344 __ load_klass(rscratch1, obj);
1345 __ cmp( rscratch1, k_RInfo);
1346
1347 __ br(Assembler::NE, *failure_target);
1348 // successful cast, fall through to profile or jump
1349 } else {
1350 // get object class
1351 // not a safepoint as obj null check happens earlier
1352 __ load_klass(klass_RInfo, obj);
1353 if (k->is_loaded()) {
1354 // See if we get an immediate positive hit
1355 __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1356 __ cmp(k_RInfo, rscratch1);
1357 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1358 __ br(Assembler::NE, *failure_target);
1359 // successful cast, fall through to profile or jump
1360 } else {
1361 // See if we get an immediate positive hit
1362 __ br(Assembler::EQ, *success_target);
1363 // check for self
1364 __ cmp(klass_RInfo, k_RInfo);
1365 __ br(Assembler::EQ, *success_target);
1366
1367 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1368 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1369 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1370 // result is a boolean
1371 __ cbzw(klass_RInfo, *failure_target);
1372 // successful cast, fall through to profile or jump
1373 }
1374 } else {
1375 // perform the fast part of the checking logic
1999 FloatRegister reg2 = opr2->as_double_reg();
2000 __ fcmpd(reg1, reg2);
2001 } else {
2002 ShouldNotReachHere();
2003 }
2004 }
2005
2006 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2007 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2008 bool is_unordered_less = (code == lir_ucmp_fd2i);
2009 if (left->is_single_fpu()) {
2010 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2011 } else if (left->is_double_fpu()) {
2012 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2013 } else {
2014 ShouldNotReachHere();
2015 }
2016 } else if (code == lir_cmp_l2i) {
2017 Label done;
2018 __ cmp(left->as_register_lo(), right->as_register_lo());
2019 __ mov(dst->as_register(), (uint64_t)-1L);
2020 __ br(Assembler::LT, done);
2021 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2022 __ bind(done);
2023 } else {
2024 ShouldNotReachHere();
2025 }
2026 }
2027
2028
2029 void LIR_Assembler::align_call(LIR_Code code) { }
2030
2031
2032 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2033 address call = __ trampoline_call(Address(op->addr(), rtype));
2034 if (call == NULL) {
2035 bailout("trampoline stub overflow");
2036 return;
2037 }
2038 add_call_info(code_offset(), op->info());
2039 }
2267 // expects them.
2268 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2269 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2270 __ ldr(src, Address(sp, 4*BytesPerWord));
2271
2272 // r0 is -1^K where K == partial copied count
2273 __ eonw(rscratch1, r0, zr);
2274 // adjust length down and src/end pos up by partial copied count
2275 __ subw(length, length, rscratch1);
2276 __ addw(src_pos, src_pos, rscratch1);
2277 __ addw(dst_pos, dst_pos, rscratch1);
2278 __ b(*stub->entry());
2279
2280 __ bind(*stub->continuation());
2281 return;
2282 }
2283
2284 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2285
2286 int elem_size = type2aelembytes(basic_type);
2287 int scale = exact_log2(elem_size);
2288
2289 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2290 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2291 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2292 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2293
2294 // test for NULL
2295 if (flags & LIR_OpArrayCopy::src_null_check) {
2296 __ cbz(src, *stub->entry());
2297 }
2298 if (flags & LIR_OpArrayCopy::dst_null_check) {
2299 __ cbz(dst, *stub->entry());
2300 }
2301
2302 // If the compiler was not able to prove that exact type of the source or the destination
2303 // of the arraycopy is an array type, check at runtime if the source or the destination is
2304 // an instance type.
2305 if (flags & LIR_OpArrayCopy::type_check) {
2306 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2657
2658
2659 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2660 Unimplemented();
2661 }
2662
2663
2664 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2665 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2666 }
2667
2668 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2669 assert(op->crc()->is_single_cpu(), "crc must be register");
2670 assert(op->val()->is_single_cpu(), "byte value must be register");
2671 assert(op->result_opr()->is_single_cpu(), "result must be register");
2672 Register crc = op->crc()->as_register();
2673 Register val = op->val()->as_register();
2674 Register res = op->result_opr()->as_register();
2675
2676 assert_different_registers(val, crc, res);
2677 uint64_t offset;
2678 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2679 if (offset) __ add(res, res, offset);
2680
2681 __ mvnw(crc, crc); // ~crc
2682 __ update_byte_crc32(crc, val, res);
2683 __ mvnw(res, crc); // ~crc
2684 }
2685
2686 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2687 COMMENT("emit_profile_type {");
2688 Register obj = op->obj()->as_register();
2689 Register tmp = op->tmp()->as_pointer_register();
2690 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2691 ciKlass* exact_klass = op->exact_klass();
2692 intptr_t current_klass = op->current_klass();
2693 bool not_null = op->not_null();
2694 bool no_conflict = op->no_conflict();
2695
2696 Label update, next, none;
2697
|