< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page
8248238: Adding Windows support to OpenJDK on AArch64

Summary: LP64 vs LLP64 changes to add Windows support

Contributed-by: Monica Beckwith <monica.beckwith@microsoft.com>, Ludovic Henry <luhenry@microsoft.com>
Reviewed-by:


1335     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1336   } else {
1337     __ mov_metadata(k_RInfo, k->constant_encoding());
1338   }
1339   __ verify_oop(obj);
1340 
1341   if (op->fast_check()) {
1342     // get object class
1343     // not a safepoint as obj null check happens earlier
1344     __ load_klass(rscratch1, obj);
1345     __ cmp( rscratch1, k_RInfo);
1346 
1347     __ br(Assembler::NE, *failure_target);
1348     // successful cast, fall through to profile or jump
1349   } else {
1350     // get object class
1351     // not a safepoint as obj null check happens earlier
1352     __ load_klass(klass_RInfo, obj);
1353     if (k->is_loaded()) {
1354       // See if we get an immediate positive hit
1355       __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1356       __ cmp(k_RInfo, rscratch1);
1357       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1358         __ br(Assembler::NE, *failure_target);
1359         // successful cast, fall through to profile or jump
1360       } else {
1361         // See if we get an immediate positive hit
1362         __ br(Assembler::EQ, *success_target);
1363         // check for self
1364         __ cmp(klass_RInfo, k_RInfo);
1365         __ br(Assembler::EQ, *success_target);
1366 
1367         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1368         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1369         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1370         // result is a boolean
1371         __ cbzw(klass_RInfo, *failure_target);
1372         // successful cast, fall through to profile or jump
1373       }
1374     } else {
1375       // perform the fast part of the checking logic


1999     FloatRegister reg2 = opr2->as_double_reg();
2000     __ fcmpd(reg1, reg2);
2001   } else {
2002     ShouldNotReachHere();
2003   }
2004 }
2005 
2006 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2007   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2008     bool is_unordered_less = (code == lir_ucmp_fd2i);
2009     if (left->is_single_fpu()) {
2010       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2011     } else if (left->is_double_fpu()) {
2012       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2013     } else {
2014       ShouldNotReachHere();
2015     }
2016   } else if (code == lir_cmp_l2i) {
2017     Label done;
2018     __ cmp(left->as_register_lo(), right->as_register_lo());
2019     __ mov(dst->as_register(), (u_int64_t)-1L);
2020     __ br(Assembler::LT, done);
2021     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2022     __ bind(done);
2023   } else {
2024     ShouldNotReachHere();
2025   }
2026 }
2027 
2028 
2029 void LIR_Assembler::align_call(LIR_Code code) {  }
2030 
2031 
2032 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2033   address call = __ trampoline_call(Address(op->addr(), rtype));
2034   if (call == NULL) {
2035     bailout("trampoline stub overflow");
2036     return;
2037   }
2038   add_call_info(code_offset(), op->info());
2039 }


2658 
2659 
2660 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2661   Unimplemented();
2662 }
2663 
2664 
2665 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2666   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2667 }
2668 
2669 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2670   assert(op->crc()->is_single_cpu(),  "crc must be register");
2671   assert(op->val()->is_single_cpu(),  "byte value must be register");
2672   assert(op->result_opr()->is_single_cpu(), "result must be register");
2673   Register crc = op->crc()->as_register();
2674   Register val = op->val()->as_register();
2675   Register res = op->result_opr()->as_register();
2676 
2677   assert_different_registers(val, crc, res);
2678   unsigned long offset;
2679   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2680   if (offset) __ add(res, res, offset);
2681 
2682   __ mvnw(crc, crc); // ~crc
2683   __ update_byte_crc32(crc, val, res);
2684   __ mvnw(res, crc); // ~crc
2685 }
2686 
2687 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2688   COMMENT("emit_profile_type {");
2689   Register obj = op->obj()->as_register();
2690   Register tmp = op->tmp()->as_pointer_register();
2691   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2692   ciKlass* exact_klass = op->exact_klass();
2693   intptr_t current_klass = op->current_klass();
2694   bool not_null = op->not_null();
2695   bool no_conflict = op->no_conflict();
2696 
2697   Label update, next, none;
2698 




1335     klass2reg_with_patching(k_RInfo, op->info_for_patch());
1336   } else {
1337     __ mov_metadata(k_RInfo, k->constant_encoding());
1338   }
1339   __ verify_oop(obj);
1340 
1341   if (op->fast_check()) {
1342     // get object class
1343     // not a safepoint as obj null check happens earlier
1344     __ load_klass(rscratch1, obj);
1345     __ cmp( rscratch1, k_RInfo);
1346 
1347     __ br(Assembler::NE, *failure_target);
1348     // successful cast, fall through to profile or jump
1349   } else {
1350     // get object class
1351     // not a safepoint as obj null check happens earlier
1352     __ load_klass(klass_RInfo, obj);
1353     if (k->is_loaded()) {
1354       // See if we get an immediate positive hit
1355       __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
1356       __ cmp(k_RInfo, rscratch1);
1357       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1358         __ br(Assembler::NE, *failure_target);
1359         // successful cast, fall through to profile or jump
1360       } else {
1361         // See if we get an immediate positive hit
1362         __ br(Assembler::EQ, *success_target);
1363         // check for self
1364         __ cmp(klass_RInfo, k_RInfo);
1365         __ br(Assembler::EQ, *success_target);
1366 
1367         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1368         __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1369         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1370         // result is a boolean
1371         __ cbzw(klass_RInfo, *failure_target);
1372         // successful cast, fall through to profile or jump
1373       }
1374     } else {
1375       // perform the fast part of the checking logic


1999     FloatRegister reg2 = opr2->as_double_reg();
2000     __ fcmpd(reg1, reg2);
2001   } else {
2002     ShouldNotReachHere();
2003   }
2004 }
2005 
2006 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2007   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2008     bool is_unordered_less = (code == lir_ucmp_fd2i);
2009     if (left->is_single_fpu()) {
2010       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2011     } else if (left->is_double_fpu()) {
2012       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2013     } else {
2014       ShouldNotReachHere();
2015     }
2016   } else if (code == lir_cmp_l2i) {
2017     Label done;
2018     __ cmp(left->as_register_lo(), right->as_register_lo());
2019     __ mov(dst->as_register(), (uint64_t)-1L);
2020     __ br(Assembler::LT, done);
2021     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2022     __ bind(done);
2023   } else {
2024     ShouldNotReachHere();
2025   }
2026 }
2027 
2028 
2029 void LIR_Assembler::align_call(LIR_Code code) {  }
2030 
2031 
2032 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2033   address call = __ trampoline_call(Address(op->addr(), rtype));
2034   if (call == NULL) {
2035     bailout("trampoline stub overflow");
2036     return;
2037   }
2038   add_call_info(code_offset(), op->info());
2039 }


2658 
2659 
2660 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2661   Unimplemented();
2662 }
2663 
2664 
2665 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2666   __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2667 }
2668 
2669 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2670   assert(op->crc()->is_single_cpu(),  "crc must be register");
2671   assert(op->val()->is_single_cpu(),  "byte value must be register");
2672   assert(op->result_opr()->is_single_cpu(), "result must be register");
2673   Register crc = op->crc()->as_register();
2674   Register val = op->val()->as_register();
2675   Register res = op->result_opr()->as_register();
2676 
2677   assert_different_registers(val, crc, res);
2678   uint64_t offset;
2679   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2680   if (offset) __ add(res, res, offset);
2681 
2682   __ mvnw(crc, crc); // ~crc
2683   __ update_byte_crc32(crc, val, res);
2684   __ mvnw(res, crc); // ~crc
2685 }
2686 
2687 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2688   COMMENT("emit_profile_type {");
2689   Register obj = op->obj()->as_register();
2690   Register tmp = op->tmp()->as_pointer_register();
2691   Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2692   ciKlass* exact_klass = op->exact_klass();
2693   intptr_t current_klass = op->current_klass();
2694   bool not_null = op->not_null();
2695   bool no_conflict = op->no_conflict();
2696 
2697   Label update, next, none;
2698 


< prev index next >