< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page
rev 53735 : AArch64 support for ValueTypes


1285 
1286   // Unspill the temp. registers:
1287   pop(pushed_registers, sp);
1288 
1289   br(Assembler::NE, *L_failure);
1290 
1291   // Success.  Cache the super we found and proceed in triumph.
1292   str(super_klass, super_cache_addr);
1293 
1294   if (L_success != &L_fallthrough) {
1295     b(*L_success);
1296   }
1297 
1298 #undef IS_A_TEMP
1299 
1300   bind(L_fallthrough);
1301 }
1302 
1303 
1304 void MacroAssembler::verify_oop(Register reg, const char* s) {
1305   if (!VerifyOops) return;




1306 
1307   // Pass register number to verify_oop_subroutine
1308   const char* b = NULL;
1309   {
1310     ResourceMark rm;
1311     stringStream ss;
1312     ss.print("verify_oop: %s: %s", reg->name(), s);
1313     b = code_string(ss.as_string());
1314   }
1315   BLOCK_COMMENT("verify_oop {");
1316 
1317   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1318   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1319 
1320   mov(r0, reg);
1321   mov(rscratch1, (address)b);
1322 
1323   // call indirectly to solve generation ordering problem
1324   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1325   ldr(rscratch2, Address(rscratch2));
1326   blr(rscratch2);
1327 
1328   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1329   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1330 
1331   BLOCK_COMMENT("} verify_oop");
1332 }
1333 
1334 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1335   if (!VerifyOops) return;




1336 
1337   const char* b = NULL;
1338   {
1339     ResourceMark rm;
1340     stringStream ss;
1341     ss.print("verify_oop_addr: %s", s);
1342     b = code_string(ss.as_string());
1343   }
1344   BLOCK_COMMENT("verify_oop_addr {");
1345 
1346   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1347   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1348 
1349   // addr may contain sp so we will have to adjust it based on the
1350   // pushes that we just did.
1351   if (addr.uses(sp)) {
1352     lea(r0, addr);
1353     ldr(r0, Address(r0, 4 * wordSize));
1354   } else {
1355     ldr(r0, addr);


1418 
1419 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1420   pass_arg0(this, arg_0);
1421   call_VM_leaf_base(entry_point, 1);
1422 }
1423 
1424 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1425   pass_arg0(this, arg_0);
1426   pass_arg1(this, arg_1);
1427   call_VM_leaf_base(entry_point, 2);
1428 }
1429 
1430 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1431                                   Register arg_1, Register arg_2) {
1432   pass_arg0(this, arg_0);
1433   pass_arg1(this, arg_1);
1434   pass_arg2(this, arg_2);
1435   call_VM_leaf_base(entry_point, 3);
1436 }
1437 




1438 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1439   pass_arg0(this, arg_0);
1440   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1441 }
1442 
1443 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1444 
1445   assert(arg_0 != c_rarg1, "smashed arg");
1446   pass_arg1(this, arg_1);
1447   pass_arg0(this, arg_0);
1448   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1449 }
1450 
1451 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1452   assert(arg_0 != c_rarg2, "smashed arg");
1453   assert(arg_1 != c_rarg2, "smashed arg");
1454   pass_arg2(this, arg_2);
1455   assert(arg_0 != c_rarg1, "smashed arg");
1456   pass_arg1(this, arg_1);
1457   pass_arg0(this, arg_0);


1467   assert(arg_1 != c_rarg2, "smashed arg");
1468   pass_arg2(this, arg_2);
1469   assert(arg_0 != c_rarg1, "smashed arg");
1470   pass_arg1(this, arg_1);
1471   pass_arg0(this, arg_0);
1472   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1473 }
1474 
1475 void MacroAssembler::null_check(Register reg, int offset) {
1476   if (needs_explicit_null_check(offset)) {
1477     // provoke OS NULL exception if reg = NULL by
1478     // accessing M[reg] w/o changing any registers
1479     // NOTE: this is plenty to provoke a segv
1480     ldr(zr, Address(reg));
1481   } else {
1482     // nothing to do, (later) access of M[reg + offset]
1483     // will provoke OS NULL exception if reg = NULL
1484   }
1485 }
1486 

































1487 // MacroAssembler protected routines needed to implement
1488 // public methods
1489 
1490 void MacroAssembler::mov(Register r, Address dest) {
1491   code_section()->relocate(pc(), dest.rspec());
1492   u_int64_t imm64 = (u_int64_t)dest.target();
1493   movptr(r, imm64);
1494 }
1495 
1496 // Move a constant pointer into r.  In AArch64 mode the virtual
1497 // address space is 48 bits in size, so we only need three
1498 // instructions to create a patchable instruction sequence that can
1499 // reach anywhere.
1500 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1501 #ifndef PRODUCT
1502   {
1503     char buffer[64];
1504     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1505     block_comment(buffer);
1506   }


5833 
5834 // get_thread() can be called anywhere inside generated code so we
5835 // need to save whatever non-callee save context might get clobbered
5836 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5837 // the call setup code.
5838 //
5839 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5840 //
5841 void MacroAssembler::get_thread(Register dst) {
5842   RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5843   push(saved_regs, sp);
5844 
5845   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5846   blrt(lr, 1, 0, 1);
5847   if (dst != c_rarg0) {
5848     mov(dst, c_rarg0);
5849   }
5850 
5851   pop(saved_regs, sp);
5852 }









1285 
1286   // Unspill the temp. registers:
1287   pop(pushed_registers, sp);
1288 
1289   br(Assembler::NE, *L_failure);
1290 
1291   // Success.  Cache the super we found and proceed in triumph.
1292   str(super_klass, super_cache_addr);
1293 
1294   if (L_success != &L_fallthrough) {
1295     b(*L_success);
1296   }
1297 
1298 #undef IS_A_TEMP
1299 
1300   bind(L_fallthrough);
1301 }
1302 
1303 
1304 void MacroAssembler::verify_oop(Register reg, const char* s) {
1305   if (!VerifyOops || VerifyAdapterSharing) {
1306     // Below address of the code string confuses VerifyAdapterSharing
1307     // because it may differ between otherwise equivalent adapters.
1308     return;
1309   }
1310 
1311   // Pass register number to verify_oop_subroutine
1312   const char* b = NULL;
1313   {
1314     ResourceMark rm;
1315     stringStream ss;
1316     ss.print("verify_oop: %s: %s", reg->name(), s);
1317     b = code_string(ss.as_string());
1318   }
1319   BLOCK_COMMENT("verify_oop {");
1320 
1321   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1322   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1323 
1324   mov(r0, reg);
1325   mov(rscratch1, (address)b);
1326 
1327   // call indirectly to solve generation ordering problem
1328   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1329   ldr(rscratch2, Address(rscratch2));
1330   blr(rscratch2);
1331 
1332   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1333   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1334 
1335   BLOCK_COMMENT("} verify_oop");
1336 }
1337 
1338 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1339   if (!VerifyOops || VerifyAdapterSharing) {
1340     // Below address of the code string confuses VerifyAdapterSharing
1341     // because it may differ between otherwise equivalent adapters.
1342     return;
1343   }
1344 
1345   const char* b = NULL;
1346   {
1347     ResourceMark rm;
1348     stringStream ss;
1349     ss.print("verify_oop_addr: %s", s);
1350     b = code_string(ss.as_string());
1351   }
1352   BLOCK_COMMENT("verify_oop_addr {");
1353 
1354   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1355   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1356 
1357   // addr may contain sp so we will have to adjust it based on the
1358   // pushes that we just did.
1359   if (addr.uses(sp)) {
1360     lea(r0, addr);
1361     ldr(r0, Address(r0, 4 * wordSize));
1362   } else {
1363     ldr(r0, addr);


1426 
1427 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1428   pass_arg0(this, arg_0);
1429   call_VM_leaf_base(entry_point, 1);
1430 }
1431 
1432 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1433   pass_arg0(this, arg_0);
1434   pass_arg1(this, arg_1);
1435   call_VM_leaf_base(entry_point, 2);
1436 }
1437 
1438 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1439                                   Register arg_1, Register arg_2) {
1440   pass_arg0(this, arg_0);
1441   pass_arg1(this, arg_1);
1442   pass_arg2(this, arg_2);
1443   call_VM_leaf_base(entry_point, 3);
1444 }
1445 
1446 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1447   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1448 }
1449 
1450 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1451   pass_arg0(this, arg_0);
1452   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1453 }
1454 
1455 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1456 
1457   assert(arg_0 != c_rarg1, "smashed arg");
1458   pass_arg1(this, arg_1);
1459   pass_arg0(this, arg_0);
1460   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1461 }
1462 
1463 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1464   assert(arg_0 != c_rarg2, "smashed arg");
1465   assert(arg_1 != c_rarg2, "smashed arg");
1466   pass_arg2(this, arg_2);
1467   assert(arg_0 != c_rarg1, "smashed arg");
1468   pass_arg1(this, arg_1);
1469   pass_arg0(this, arg_0);


1479   assert(arg_1 != c_rarg2, "smashed arg");
1480   pass_arg2(this, arg_2);
1481   assert(arg_0 != c_rarg1, "smashed arg");
1482   pass_arg1(this, arg_1);
1483   pass_arg0(this, arg_0);
1484   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1485 }
1486 
1487 void MacroAssembler::null_check(Register reg, int offset) {
1488   if (needs_explicit_null_check(offset)) {
1489     // provoke OS NULL exception if reg = NULL by
1490     // accessing M[reg] w/o changing any registers
1491     // NOTE: this is plenty to provoke a segv
1492     ldr(zr, Address(reg));
1493   } else {
1494     // nothing to do, (later) access of M[reg + offset]
1495     // will provoke OS NULL exception if reg = NULL
1496   }
1497 }
1498 
1499 void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) {
1500   ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1501   andr(temp_reg, temp_reg, JVM_ACC_VALUE);
1502   cbnz(temp_reg, is_value); 
1503 }
1504 
1505 void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) {
1506   (void) temp_reg; // keep signature uniform with x86
1507   tbnz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, is_flattenable);
1508 }
1509 
1510 void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& not_flattenable) {
1511   (void) temp_reg; // keep signature uniform with x86
1512   tbz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, not_flattenable);
1513 }
1514 
1515 void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) {
1516   (void) temp_reg; // keep signature uniform with x86
1517   tbnz(flags, ConstantPoolCacheEntry::is_flattened_field_shift, is_flattened);
1518 }
1519 
1520 void MacroAssembler::test_flat_array_klass(Register klass, Register temp_reg, Label& is_flattened) {
1521   ldrw(temp_reg, Address(klass, Klass::layout_helper_offset()));
1522   asrw(temp_reg, temp_reg, Klass::_lh_array_tag_shift);
1523   cmpw(temp_reg, Klass::_lh_array_tag_vt_value);
1524   br(Assembler::EQ, is_flattened);
1525 }
1526 
1527 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, Label& is_flattened) {
1528   load_klass(temp_reg, oop);
1529   test_flat_array_klass(temp_reg, temp_reg, is_flattened);
1530 }
1531 
1532 // MacroAssembler protected routines needed to implement
1533 // public methods
1534 
1535 void MacroAssembler::mov(Register r, Address dest) {
1536   code_section()->relocate(pc(), dest.rspec());
1537   u_int64_t imm64 = (u_int64_t)dest.target();
1538   movptr(r, imm64);
1539 }
1540 
1541 // Move a constant pointer into r.  In AArch64 mode the virtual
1542 // address space is 48 bits in size, so we only need three
1543 // instructions to create a patchable instruction sequence that can
1544 // reach anywhere.
1545 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1546 #ifndef PRODUCT
1547   {
1548     char buffer[64];
1549     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1550     block_comment(buffer);
1551   }


5878 
5879 // get_thread() can be called anywhere inside generated code so we
5880 // need to save whatever non-callee save context might get clobbered
5881 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5882 // the call setup code.
5883 //
5884 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5885 //
5886 void MacroAssembler::get_thread(Register dst) {
5887   RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5888   push(saved_regs, sp);
5889 
5890   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5891   blrt(lr, 1, 0, 1);
5892   if (dst != c_rarg0) {
5893     mov(dst, c_rarg0);
5894   }
5895 
5896   pop(saved_regs, sp);
5897 }
5898 
5899 // DMS TODO ValueType MachVVEPNode support
5900 void MacroAssembler::unpack_value_args(Compile* C) {
5901   // Not implemented
5902   guarantee(false, "Support for MachVVEPNode is not implemented");
5903 }
5904 
< prev index next >