< prev index next >
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Print this page
rev 54117 : AArch64: 64-bit Literal Oops
*** 168,180 ****
int MacroAssembler::patch_oop(address insn_addr, address o) {
int instructions;
unsigned insn = *(unsigned*)insn_addr;
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
! // OOPs are either narrow (32 bits) or wide (48 bits). We encode
// narrow OOPs by setting the upper 16 bits in the first
// instruction.
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
// Move narrow OOP
narrowOop n = CompressedOops::encode((oop)o);
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
--- 168,181 ----
int MacroAssembler::patch_oop(address insn_addr, address o) {
int instructions;
unsigned insn = *(unsigned*)insn_addr;
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
! // OOPs are either narrow (32 bits) or wide (48 or 64 bits). We encode
// narrow OOPs by setting the upper 16 bits in the first
// instruction.
+ // 64 bit addresses are only enabled with Use64BitLiteralAddresses set.
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
// Move narrow OOP
narrowOop n = CompressedOops::encode((oop)o);
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
*** 185,194 ****
--- 186,201 ----
uintptr_t dest = (uintptr_t)o;
Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
instructions = 3;
+
+ if (Use64BitLiteralAddresses) {
+ assert(nativeInstruction_at(insn_addr+12)->is_movk(), "wrong insns in patch");
+ Instruction_aarch64::patch(insn_addr+12, 20, 5, (dest >>= 16) & 0xffff);
+ instructions = 4;
+ }
}
return instructions * NativeInstruction::instruction_size;
}
int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
*** 271,286 ****
} else {
ShouldNotReachHere();
}
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
u_int32_t *insns = (u_int32_t *)insn_addr;
! // Move wide constant: movz, movk, movk. See movptr().
! assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
! assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
! return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
! + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
return 0;
} else {
ShouldNotReachHere();
--- 278,300 ----
} else {
ShouldNotReachHere();
}
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
u_int32_t *insns = (u_int32_t *)insn_addr;
! // Move wide constant: movz, movk, movk [, movk]. See movptr().
! assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch - 2nd movk missing");
! assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch - 3rd movk missing");
! u_int64_t addr = u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5))
+ (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
! + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32);
!
! // Allow for getting the target address of a possible adddress.
! if (Use64BitLiteralAddresses) {
! assert(nativeInstruction_at(insns+3)->is_movk(), "wrong insns in patch - 4th movk missing.");
! addr += u_int64_t(Instruction_aarch64::extract(insns[3], 20, 5)) << 48;
! }
! return (address) addr;
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
return 0;
} else {
ShouldNotReachHere();
*** 1485,1512 ****
code_section()->relocate(pc(), dest.rspec());
u_int64_t imm64 = (u_int64_t)dest.target();
movptr(r, imm64);
}
! // Move a constant pointer into r. In AArch64 mode the virtual
! // address space is 48 bits in size, so we only need three
! // instructions to create a patchable instruction sequence that can
! // reach anywhere.
void MacroAssembler::movptr(Register r, uintptr_t imm64) {
#ifndef PRODUCT
{
char buffer[64];
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
block_comment(buffer);
}
#endif
assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
movz(r, imm64 & 0xffff);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 16);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 32);
}
// Macro to mov replicated immediate to vector register.
// Vd will get the following values for different arrangements in T
// imm32 == hex 000000gh T8B: Vd = ghghghghghghghgh
--- 1499,1532 ----
code_section()->relocate(pc(), dest.rspec());
u_int64_t imm64 = (u_int64_t)dest.target();
movptr(r, imm64);
}
! // Move a constant pointer into r. In AArch64 mode the virtual address space
! // is 48 bits in size or 52 bits. We need three or four instructions to create
! // a patchable instruction sequence that can reach anywhere.
void MacroAssembler::movptr(Register r, uintptr_t imm64) {
#ifndef PRODUCT
{
char buffer[64];
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
block_comment(buffer);
}
#endif
+ if (!Use64BitLiteralAddresses) {
assert(imm64 < (1ul << 48), "48-bit overflow in address constant");
+ }
movz(r, imm64 & 0xffff);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 16);
imm64 >>= 16;
movk(r, imm64 & 0xffff, 32);
+
+ if (Use64BitLiteralAddresses) {
+ imm64 >>= 16;
+ movk(r, imm64 & 0xffff, 48);
+ }
}
// Macro to mov replicated immediate to vector register.
// Vd will get the following values for different arrangements in T
// imm32 == hex 000000gh T8B: Vd = ghghghghghghghgh
< prev index next >