< prev index next >

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Print this page




1292 
1293   // Unspill the temp. registers:
1294   pop(pushed_registers, sp);
1295 
1296   br(Assembler::NE, *L_failure);
1297 
1298   // Success.  Cache the super we found and proceed in triumph.
1299   str(super_klass, super_cache_addr);
1300 
1301   if (L_success != &L_fallthrough) {
1302     b(*L_success);
1303   }
1304 
1305 #undef IS_A_TEMP
1306 
1307   bind(L_fallthrough);
1308 }
1309 
1310 
1311 void MacroAssembler::verify_oop(Register reg, const char* s) {
1312   if (!VerifyOops) return;




1313 
1314   // Pass register number to verify_oop_subroutine
1315   const char* b = NULL;
1316   {
1317     ResourceMark rm;
1318     stringStream ss;
1319     ss.print("verify_oop: %s: %s", reg->name(), s);
1320     b = code_string(ss.as_string());
1321   }
1322   BLOCK_COMMENT("verify_oop {");
1323 
1324   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1325   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1326 
1327   mov(r0, reg);
1328   mov(rscratch1, (address)b);
1329 
1330   // call indirectly to solve generation ordering problem
1331   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1332   ldr(rscratch2, Address(rscratch2));
1333   blr(rscratch2);
1334 
1335   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1336   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1337 
1338   BLOCK_COMMENT("} verify_oop");
1339 }
1340 
1341 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1342   if (!VerifyOops) return;




1343 
1344   const char* b = NULL;
1345   {
1346     ResourceMark rm;
1347     stringStream ss;
1348     ss.print("verify_oop_addr: %s", s);
1349     b = code_string(ss.as_string());
1350   }
1351   BLOCK_COMMENT("verify_oop_addr {");
1352 
1353   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1354   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1355 
1356   // addr may contain sp so we will have to adjust it based on the
1357   // pushes that we just did.
1358   if (addr.uses(sp)) {
1359     lea(r0, addr);
1360     ldr(r0, Address(r0, 4 * wordSize));
1361   } else {
1362     ldr(r0, addr);


1425 
1426 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1427   pass_arg0(this, arg_0);
1428   call_VM_leaf_base(entry_point, 1);
1429 }
1430 
1431 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1432   pass_arg0(this, arg_0);
1433   pass_arg1(this, arg_1);
1434   call_VM_leaf_base(entry_point, 2);
1435 }
1436 
1437 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1438                                   Register arg_1, Register arg_2) {
1439   pass_arg0(this, arg_0);
1440   pass_arg1(this, arg_1);
1441   pass_arg2(this, arg_2);
1442   call_VM_leaf_base(entry_point, 3);
1443 }
1444 




1445 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1446   pass_arg0(this, arg_0);
1447   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1448 }
1449 
1450 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1451 
1452   assert(arg_0 != c_rarg1, "smashed arg");
1453   pass_arg1(this, arg_1);
1454   pass_arg0(this, arg_0);
1455   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1456 }
1457 
1458 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1459   assert(arg_0 != c_rarg2, "smashed arg");
1460   assert(arg_1 != c_rarg2, "smashed arg");
1461   pass_arg2(this, arg_2);
1462   assert(arg_0 != c_rarg1, "smashed arg");
1463   pass_arg1(this, arg_1);
1464   pass_arg0(this, arg_0);


1474   assert(arg_1 != c_rarg2, "smashed arg");
1475   pass_arg2(this, arg_2);
1476   assert(arg_0 != c_rarg1, "smashed arg");
1477   pass_arg1(this, arg_1);
1478   pass_arg0(this, arg_0);
1479   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1480 }
1481 
1482 void MacroAssembler::null_check(Register reg, int offset) {
1483   if (needs_explicit_null_check(offset)) {
1484     // provoke OS NULL exception if reg = NULL by
1485     // accessing M[reg] w/o changing any registers
1486     // NOTE: this is plenty to provoke a segv
1487     ldr(zr, Address(reg));
1488   } else {
1489     // nothing to do, (later) access of M[reg + offset]
1490     // will provoke OS NULL exception if reg = NULL
1491   }
1492 }
1493 

































1494 // MacroAssembler protected routines needed to implement
1495 // public methods
1496 
1497 void MacroAssembler::mov(Register r, Address dest) {
1498   code_section()->relocate(pc(), dest.rspec());
1499   u_int64_t imm64 = (u_int64_t)dest.target();
1500   movptr(r, imm64);
1501 }
1502 
1503 // Move a constant pointer into r.  In AArch64 mode the virtual
1504 // address space is 48 bits in size, so we only need three
1505 // instructions to create a patchable instruction sequence that can
1506 // reach anywhere.
1507 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1508 #ifndef PRODUCT
1509   {
1510     char buffer[64];
1511     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1512     block_comment(buffer);
1513   }


3666     adr = Address(rscratch2);
3667     break;
3668   }
3669   ldr(rscratch1, adr);
3670   add(rscratch1, rscratch1, src);
3671   str(rscratch1, adr);
3672 }
3673 
3674 void MacroAssembler::cmpptr(Register src1, Address src2) {
3675   unsigned long offset;
3676   adrp(rscratch1, src2, offset);
3677   ldr(rscratch1, Address(rscratch1, offset));
3678   cmp(src1, rscratch1);
3679 }
3680 
3681 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3682   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3683   bs->obj_equals(this, obj1, obj2);
3684 }
3685 
3686 void MacroAssembler::load_klass(Register dst, Register src) {
3687   if (UseCompressedClassPointers) {
3688     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3689     decode_klass_not_null(dst);
3690   } else {
3691     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3692   }
3693 }
3694 










3695 // ((OopHandle)result).resolve();
3696 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3697   // OopHandle::resolve is an indirection.
3698   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3699 }
3700 
3701 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3702   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3703   ldr(dst, Address(rmethod, Method::const_offset()));
3704   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3705   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3706   ldr(dst, Address(dst, mirror_offset));
3707   resolve_oop_handle(dst, tmp);
3708 }
3709 









3710 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3711   if (UseCompressedClassPointers) {
3712     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3713     if (CompressedKlassPointers::base() == NULL) {
3714       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3715       return;
3716     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3717                && CompressedKlassPointers::shift() == 0) {
3718       // Only the bottom 32 bits matter
3719       cmpw(trial_klass, tmp);
3720       return;
3721     }
3722     decode_klass_not_null(tmp);
3723   } else {
3724     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3725   }
3726   cmp(trial_klass, tmp);
3727 }
3728 
3729 void MacroAssembler::load_prototype_header(Register dst, Register src) {


4007   narrowKlass nk = CompressedKlassPointers::encode(k);
4008   movz(dst, (nk >> 16), 16);
4009   movk(dst, nk & 0xffff);
4010 }
4011 
4012 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4013                                     Register dst, Address src,
4014                                     Register tmp1, Register thread_tmp) {
4015   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4016   decorators = AccessInternal::decorator_fixup(decorators);
4017   bool as_raw = (decorators & AS_RAW) != 0;
4018   if (as_raw) {
4019     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4020   } else {
4021     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4022   }
4023 }
4024 
4025 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4026                                      Address dst, Register src,
4027                                      Register tmp1, Register thread_tmp) {
4028   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4029   decorators = AccessInternal::decorator_fixup(decorators);
4030   bool as_raw = (decorators & AS_RAW) != 0;
4031   if (as_raw) {
4032     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4033   } else {
4034     bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4035   }
4036 }
4037 
4038 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
4039   // Use stronger ACCESS_WRITE|ACCESS_READ by default.
4040   if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
4041     decorators |= ACCESS_READ | ACCESS_WRITE;
4042   }
4043   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4044   return bs->resolve(this, decorators, obj);
4045 }
4046 
4047 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4048                                    Register thread_tmp, DecoratorSet decorators) {
4049   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4050 }
4051 
4052 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4053                                             Register thread_tmp, DecoratorSet decorators) {
4054   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4055 }
4056 
4057 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4058                                     Register thread_tmp, DecoratorSet decorators) {
4059   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4060 }
4061 
4062 // Used for storing NULLs.
4063 void MacroAssembler::store_heap_oop_null(Address dst) {
4064   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg);
4065 }
4066 
4067 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
4068   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
4069   int index = oop_recorder()->allocate_metadata_index(obj);
4070   RelocationHolder rspec = metadata_Relocation::spec(index);
4071   return Address((address)obj, rspec);
4072 }
4073 
4074 // Move an oop into a register.  immediate is true if we want
4075 // immediate instrcutions, i.e. we are not going to patch this
4076 // instruction while the code is being executed by another thread.  In
4077 // that case we can use move immediates rather than the constant pool.
4078 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
4079   int oop_index;
4080   if (obj == NULL) {
4081     oop_index = oop_recorder()->allocate_oop_index(obj);
4082   } else {
4083 #ifdef ASSERT
4084     {


5847 }
5848 
5849 // get_thread() can be called anywhere inside generated code so we
5850 // need to save whatever non-callee save context might get clobbered
5851 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5852 // the call setup code.
5853 //
5854 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5855 //
5856 void MacroAssembler::get_thread(Register dst) {
5857   RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5858   push(saved_regs, sp);
5859 
5860   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5861   blrt(lr, 1, 0, 1);
5862   if (dst != c_rarg0) {
5863     mov(dst, c_rarg0);
5864   }
5865 
5866   pop(saved_regs, sp);




































5867 }


1292 
1293   // Unspill the temp. registers:
1294   pop(pushed_registers, sp);
1295 
1296   br(Assembler::NE, *L_failure);
1297 
1298   // Success.  Cache the super we found and proceed in triumph.
1299   str(super_klass, super_cache_addr);
1300 
1301   if (L_success != &L_fallthrough) {
1302     b(*L_success);
1303   }
1304 
1305 #undef IS_A_TEMP
1306 
1307   bind(L_fallthrough);
1308 }
1309 
1310 
1311 void MacroAssembler::verify_oop(Register reg, const char* s) {
1312   if (!VerifyOops || VerifyAdapterSharing) {
1313     // Below address of the code string confuses VerifyAdapterSharing
1314     // because it may differ between otherwise equivalent adapters.
1315     return;
1316   }
1317 
1318   // Pass register number to verify_oop_subroutine
1319   const char* b = NULL;
1320   {
1321     ResourceMark rm;
1322     stringStream ss;
1323     ss.print("verify_oop: %s: %s", reg->name(), s);
1324     b = code_string(ss.as_string());
1325   }
1326   BLOCK_COMMENT("verify_oop {");
1327 
1328   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1329   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1330 
1331   mov(r0, reg);
1332   mov(rscratch1, (address)b);
1333 
1334   // call indirectly to solve generation ordering problem
1335   lea(rscratch2, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
1336   ldr(rscratch2, Address(rscratch2));
1337   blr(rscratch2);
1338 
1339   ldp(rscratch2, lr, Address(post(sp, 2 * wordSize)));
1340   ldp(r0, rscratch1, Address(post(sp, 2 * wordSize)));
1341 
1342   BLOCK_COMMENT("} verify_oop");
1343 }
1344 
1345 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
1346   if (!VerifyOops || VerifyAdapterSharing) {
1347     // Below address of the code string confuses VerifyAdapterSharing
1348     // because it may differ between otherwise equivalent adapters.
1349     return;
1350   }
1351 
1352   const char* b = NULL;
1353   {
1354     ResourceMark rm;
1355     stringStream ss;
1356     ss.print("verify_oop_addr: %s", s);
1357     b = code_string(ss.as_string());
1358   }
1359   BLOCK_COMMENT("verify_oop_addr {");
1360 
1361   stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
1362   stp(rscratch2, lr, Address(pre(sp, -2 * wordSize)));
1363 
1364   // addr may contain sp so we will have to adjust it based on the
1365   // pushes that we just did.
1366   if (addr.uses(sp)) {
1367     lea(r0, addr);
1368     ldr(r0, Address(r0, 4 * wordSize));
1369   } else {
1370     ldr(r0, addr);


1433 
1434 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
1435   pass_arg0(this, arg_0);
1436   call_VM_leaf_base(entry_point, 1);
1437 }
1438 
1439 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1440   pass_arg0(this, arg_0);
1441   pass_arg1(this, arg_1);
1442   call_VM_leaf_base(entry_point, 2);
1443 }
1444 
1445 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0,
1446                                   Register arg_1, Register arg_2) {
1447   pass_arg0(this, arg_0);
1448   pass_arg1(this, arg_1);
1449   pass_arg2(this, arg_2);
1450   call_VM_leaf_base(entry_point, 3);
1451 }
1452 
1453 void MacroAssembler::super_call_VM_leaf(address entry_point) {
1454   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1455 }
1456 
1457 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
1458   pass_arg0(this, arg_0);
1459   MacroAssembler::call_VM_leaf_base(entry_point, 1);
1460 }
1461 
1462 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
1463 
1464   assert(arg_0 != c_rarg1, "smashed arg");
1465   pass_arg1(this, arg_1);
1466   pass_arg0(this, arg_0);
1467   MacroAssembler::call_VM_leaf_base(entry_point, 2);
1468 }
1469 
1470 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
1471   assert(arg_0 != c_rarg2, "smashed arg");
1472   assert(arg_1 != c_rarg2, "smashed arg");
1473   pass_arg2(this, arg_2);
1474   assert(arg_0 != c_rarg1, "smashed arg");
1475   pass_arg1(this, arg_1);
1476   pass_arg0(this, arg_0);


1486   assert(arg_1 != c_rarg2, "smashed arg");
1487   pass_arg2(this, arg_2);
1488   assert(arg_0 != c_rarg1, "smashed arg");
1489   pass_arg1(this, arg_1);
1490   pass_arg0(this, arg_0);
1491   MacroAssembler::call_VM_leaf_base(entry_point, 4);
1492 }
1493 
1494 void MacroAssembler::null_check(Register reg, int offset) {
1495   if (needs_explicit_null_check(offset)) {
1496     // provoke OS NULL exception if reg = NULL by
1497     // accessing M[reg] w/o changing any registers
1498     // NOTE: this is plenty to provoke a segv
1499     ldr(zr, Address(reg));
1500   } else {
1501     // nothing to do, (later) access of M[reg + offset]
1502     // will provoke OS NULL exception if reg = NULL
1503   }
1504 }
1505 
1506 void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) {
1507   ldrw(temp_reg, Address(klass, Klass::access_flags_offset()));
1508   andr(temp_reg, temp_reg, JVM_ACC_VALUE);
1509   cbnz(temp_reg, is_value); 
1510 }
1511 
1512 void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) {
1513   (void) temp_reg; // keep signature uniform with x86
1514   tbnz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, is_flattenable);
1515 }
1516 
1517 void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& not_flattenable) {
1518   (void) temp_reg; // keep signature uniform with x86
1519   tbz(flags, ConstantPoolCacheEntry::is_flattenable_field_shift, not_flattenable);
1520 }
1521 
1522 void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) {
1523   (void) temp_reg; // keep signature uniform with x86
1524   tbnz(flags, ConstantPoolCacheEntry::is_flattened_field_shift, is_flattened);
1525 }
1526 
1527 void MacroAssembler::test_flattened_array_oop(Register oop, Register temp_reg, Label& is_flattened_array) {
1528   load_storage_props(temp_reg, oop);
1529   andr(temp_reg, temp_reg, ArrayStorageProperties::flattened_value);
1530   cbnz(temp_reg, is_flattened_array);
1531 }
1532 
1533 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array) {
1534   load_storage_props(temp_reg, oop);
1535   andr(temp_reg, temp_reg, ArrayStorageProperties::null_free_value);
1536   cbnz(temp_reg, is_null_free_array);
1537 }
1538 
1539 // MacroAssembler protected routines needed to implement
1540 // public methods
1541 
1542 void MacroAssembler::mov(Register r, Address dest) {
1543   code_section()->relocate(pc(), dest.rspec());
1544   u_int64_t imm64 = (u_int64_t)dest.target();
1545   movptr(r, imm64);
1546 }
1547 
1548 // Move a constant pointer into r.  In AArch64 mode the virtual
1549 // address space is 48 bits in size, so we only need three
1550 // instructions to create a patchable instruction sequence that can
1551 // reach anywhere.
1552 void MacroAssembler::movptr(Register r, uintptr_t imm64) {
1553 #ifndef PRODUCT
1554   {
1555     char buffer[64];
1556     snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
1557     block_comment(buffer);
1558   }


3711     adr = Address(rscratch2);
3712     break;
3713   }
3714   ldr(rscratch1, adr);
3715   add(rscratch1, rscratch1, src);
3716   str(rscratch1, adr);
3717 }
3718 
3719 void MacroAssembler::cmpptr(Register src1, Address src2) {
3720   unsigned long offset;
3721   adrp(rscratch1, src2, offset);
3722   ldr(rscratch1, Address(rscratch1, offset));
3723   cmp(src1, rscratch1);
3724 }
3725 
3726 void MacroAssembler::cmpoop(Register obj1, Register obj2) {
3727   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3728   bs->obj_equals(this, obj1, obj2);
3729 }
3730 
3731 void MacroAssembler::load_metadata(Register dst, Register src) {
3732   if (UseCompressedClassPointers) {
3733     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));

3734   } else {
3735     ldr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
3736   }
3737 }
3738 
3739 void MacroAssembler::load_klass(Register dst, Register src) {
3740   load_metadata(dst, src);
3741   if (UseCompressedClassPointers) {
3742     andr(dst, dst, oopDesc::compressed_klass_mask());
3743     decode_klass_not_null(dst);
3744   } else {
3745     ubfm(dst, dst, 0, 63 - oopDesc::storage_props_nof_bits);
3746   }
3747 }
3748 
3749 // ((OopHandle)result).resolve();
3750 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) {
3751   // OopHandle::resolve is an indirection.
3752   access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp, noreg);
3753 }
3754 
3755 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp) {
3756   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
3757   ldr(dst, Address(rmethod, Method::const_offset()));
3758   ldr(dst, Address(dst, ConstMethod::constants_offset()));
3759   ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
3760   ldr(dst, Address(dst, mirror_offset));
3761   resolve_oop_handle(dst, tmp);
3762 }
3763 
3764 void MacroAssembler::load_storage_props(Register dst, Register src) {
3765   load_metadata(dst, src);
3766   if (UseCompressedClassPointers) {
3767     asrw(dst, dst, oopDesc::narrow_storage_props_shift);
3768   } else {
3769     asr(dst, dst, oopDesc::wide_storage_props_shift);
3770   }
3771 }
3772 
3773 void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {
3774   if (UseCompressedClassPointers) {
3775     ldrw(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3776     if (CompressedKlassPointers::base() == NULL) {
3777       cmp(trial_klass, tmp, LSL, CompressedKlassPointers::shift());
3778       return;
3779     } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0
3780                && CompressedKlassPointers::shift() == 0) {
3781       // Only the bottom 32 bits matter
3782       cmpw(trial_klass, tmp);
3783       return;
3784     }
3785     decode_klass_not_null(tmp);
3786   } else {
3787     ldr(tmp, Address(oop, oopDesc::klass_offset_in_bytes()));
3788   }
3789   cmp(trial_klass, tmp);
3790 }
3791 
3792 void MacroAssembler::load_prototype_header(Register dst, Register src) {


4070   narrowKlass nk = CompressedKlassPointers::encode(k);
4071   movz(dst, (nk >> 16), 16);
4072   movk(dst, nk & 0xffff);
4073 }
4074 
4075 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4076                                     Register dst, Address src,
4077                                     Register tmp1, Register thread_tmp) {
4078   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4079   decorators = AccessInternal::decorator_fixup(decorators);
4080   bool as_raw = (decorators & AS_RAW) != 0;
4081   if (as_raw) {
4082     bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4083   } else {
4084     bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp);
4085   }
4086 }
4087 
4088 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4089                                      Address dst, Register src,
4090                                      Register tmp1, Register thread_tmp, Register tmp3) {
4091   BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
4092   decorators = AccessInternal::decorator_fixup(decorators);
4093   bool as_raw = (decorators & AS_RAW) != 0;
4094   if (as_raw) {
4095     bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4096   } else {
4097     bs->store_at(this, decorators, type, dst, src, tmp1, thread_tmp, tmp3);
4098   }
4099 }
4100 
4101 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
4102   // Use stronger ACCESS_WRITE|ACCESS_READ by default.
4103   if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
4104     decorators |= ACCESS_READ | ACCESS_WRITE;
4105   }
4106   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4107   return bs->resolve(this, decorators, obj);
4108 }
4109 
4110 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
4111                                    Register thread_tmp, DecoratorSet decorators) {
4112   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
4113 }
4114 
4115 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1,
4116                                             Register thread_tmp, DecoratorSet decorators) {
4117   access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp);
4118 }
4119 
4120 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1,
4121                                     Register thread_tmp, Register tmp3, DecoratorSet decorators) {
4122   access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp, tmp3);
4123 }
4124 
4125 // Used for storing NULLs.
4126 void MacroAssembler::store_heap_oop_null(Address dst) {
4127   access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg);
4128 }
4129 
4130 Address MacroAssembler::allocate_metadata_address(Metadata* obj) {
4131   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
4132   int index = oop_recorder()->allocate_metadata_index(obj);
4133   RelocationHolder rspec = metadata_Relocation::spec(index);
4134   return Address((address)obj, rspec);
4135 }
4136 
4137 // Move an oop into a register.  immediate is true if we want
4138 // immediate instrcutions, i.e. we are not going to patch this
4139 // instruction while the code is being executed by another thread.  In
4140 // that case we can use move immediates rather than the constant pool.
4141 void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
4142   int oop_index;
4143   if (obj == NULL) {
4144     oop_index = oop_recorder()->allocate_oop_index(obj);
4145   } else {
4146 #ifdef ASSERT
4147     {


5910 }
5911 
5912 // get_thread() can be called anywhere inside generated code so we
5913 // need to save whatever non-callee save context might get clobbered
5914 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed,
5915 // the call setup code.
5916 //
5917 // aarch64_get_thread_helper() clobbers only r0, r1, and flags.
5918 //
5919 void MacroAssembler::get_thread(Register dst) {
5920   RegSet saved_regs = RegSet::range(r0, r1) + lr - dst;
5921   push(saved_regs, sp);
5922 
5923   mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper));
5924   blrt(lr, 1, 0, 1);
5925   if (dst != c_rarg0) {
5926     mov(dst, c_rarg0);
5927   }
5928 
5929   pop(saved_regs, sp);
5930 }
5931 
5932 // C2 compiled method's prolog code 
5933 // Moved here from aarch64.ad to support Valhalla code belows
5934 void MacroAssembler::verified_entry(Compile* C, int sp_inc) {
5935 
5936 // n.b. frame size includes space for return pc and rfp
5937   const long framesize = C->frame_size_in_bytes();
5938   assert(framesize % (2 * wordSize) == 0, "must preserve 2 * wordSize alignment");
5939 
5940   // insert a nop at the start of the prolog so we can patch in a
5941   // branch if we need to invalidate the method later
5942   nop();
5943 
5944   int bangsize = C->bang_size_in_bytes();
5945   if (C->need_stack_bang(bangsize) && UseStackBanging)
5946      generate_stack_overflow_check(bangsize);
5947 
5948   build_frame(framesize);
5949 
5950   if (NotifySimulator) {
5951     notify(Assembler::method_entry);
5952   }
5953 
5954   if (VerifyStackAtCalls) {
5955     Unimplemented();
5956   }
5957 }
5958 
5959 void MacroAssembler::unpack_value_args(Compile* C, bool receiver_only) {
5960   // Called from MachVEP node
5961   unimplemented("Support for ValueTypePassFieldsAsArgs and ValueTypeReturnedAsFields is not implemented");
5962 }
5963 
5964 void MacroAssembler::store_value_type_fields_to_buf(ciValueKlass* vk) {
5965   super_call_VM_leaf(StubRoutines::store_value_type_fields_to_buf());
5966 }
< prev index next >