# HG changeset patch # User smonteith # Date 1528882388 -3600 # Wed Jun 13 10:33:08 2018 +0100 # Node ID 8af3786ecfaa45511d2b675d01f4a79ec2f6a366 # Parent d21803f4741668794a2fd5746d3dcd1895254480 AArch64: 64-bit Literal Oops diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1031,7 +1031,7 @@ } static uint size_deopt_handler() { - // count one adr and one far branch instruction + // adr, adrp, add, br return 4 * NativeInstruction::instruction_size; } }; @@ -1491,7 +1491,11 @@ int MachCallDynamicJavaNode::ret_addr_offset() { - return 16; // movz, movk, movk, bl + if (!Use64BitLiteralAddresses) { + return 16; // movz, movk, movk, bl + } else { + return 20; // movz, movk, movk, movk, bl + } } int MachCallRuntimeNode::ret_addr_offset() { @@ -1507,7 +1511,12 @@ if (cb) { return MacroAssembler::far_branch_size(); } else { - return 6 * NativeInstruction::instruction_size; + // lea will emit 3 or an extra movk to make 4 instructions. + if (!Use64BitLiteralAddresses) { + return 6 * NativeInstruction::instruction_size; + } else { + return 7 * NativeInstruction::instruction_size; + } } } diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp @@ -248,7 +248,13 @@ // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) // - in runtime: after initializing class, restore original code, reexecute instruction -int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; +// On AArch64 we can can generate addresses with 48-bit or 64-bit addresses in +// with 3 or 4 instructions, switchable on the Use64BitLiteralAddresses option. +// The value returned is therefore not a compile-time constant, unlike on other +// platforms. +int patch_info_offset() { + return -NativeGeneralJump::instruction_size; +} void PatchingStub::align_patch_site(MacroAssembler* masm) { } diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp @@ -69,7 +69,7 @@ void deoptimize_trap(CodeEmitInfo *info); enum { - _call_stub_size = 12 * NativeInstruction::instruction_size, + _call_stub_size = 14 * NativeInstruction::instruction_size, _call_aot_stub_size = 0, _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), _deopt_handler_size = 7 * NativeInstruction::instruction_size diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp --- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp @@ -77,7 +77,12 @@ #undef __ int CompiledStaticCall::to_interp_stub_size() { - return 7 * NativeInstruction::instruction_size; + if (!Use64BitLiteralAddresses) { + return 7 * NativeInstruction::instruction_size; + } else { + // emit_to_interp_stub will emit 2 extra movk instructions. + return 9 * NativeInstruction::instruction_size; + } } int CompiledStaticCall::to_trampoline_stub_size() { diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp --- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp @@ -162,7 +162,9 @@ product(int, SoftwarePrefetchHintDistance, -1, \ "Use prfm hint with specified distance in compiled code." \ "Value -1 means off.") \ - range(-1, 4096) + range(-1, 4096) \ + experimental(bool, Use64BitLiteralAddresses, false, \ + "Use 64 bit literal addresses instead of 48 bit.") #endif diff --git a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp b/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp --- a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp @@ -34,6 +34,7 @@ #include "oops/oop.inline.hpp" int InlineCacheBuffer::ic_stub_code_size() { + // Total size of: ldr, (adrp, add, br | b), int64. return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size; } diff --git a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp --- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp @@ -53,9 +53,18 @@ assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && nativeInstruction_at(pc+4)->is_movk(), "wrong insn in patch"); } else { - // Move wide constant: movz n, movk, movk. - assert(nativeInstruction_at(pc+4)->is_movk() - && nativeInstruction_at(pc+8)->is_movk(), "wrong insn in patch"); + if (!Use64BitLiteralAddresses) { + // Move wide constant: movz n, movk, movk. + assert(nativeInstruction_at(pc+4)->is_movk() + && nativeInstruction_at(pc+8)->is_movk(), "wrong insn in patch"); + } else { + // Move wide constant: movz n, movk, movk, movk + // JVMCI Would require code generated for it to use 64 bit literals and + // it therefore disables Use64BitLiteralAddresses until support is enabled. + assert(nativeInstruction_at(pc+4)->is_movk() + && nativeInstruction_at(pc+8)->is_movk() + && nativeInstruction_at(pc+12)->is_movk(), "wrong insn in patch"); + } } } #endif // ASSERT @@ -105,6 +114,8 @@ jump->set_jump_destination((address) foreign_call_destination); _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec()); } else if (inst->is_general_jump()) { + // mov, movk, movk + assert(!Use64BitLiteralAddresses, "64 bit literals not supported."); NativeGeneralJump* jump = nativeGeneralJump_at(pc); jump->set_jump_destination((address) foreign_call_destination); _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec()); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -170,9 +170,10 @@ unsigned insn = *(unsigned*)insn_addr; assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); - // OOPs are either narrow (32 bits) or wide (48 bits). We encode + // OOPs are either narrow (32 bits) or wide (48 or 64 bits). We encode // narrow OOPs by setting the upper 16 bits in the first // instruction. + // 64 bit addresses are only enabled with Use64BitLiteralAddresses set. if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { // Move narrow OOP narrowOop n = CompressedOops::encode((oop)o); @@ -187,6 +188,12 @@ Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); instructions = 3; + + if (Use64BitLiteralAddresses) { + assert(nativeInstruction_at(insn_addr+12)->is_movk(), "wrong insns in patch"); + Instruction_aarch64::patch(insn_addr+12, 20, 5, (dest >>= 16) & 0xffff); + instructions = 4; + } } return instructions * NativeInstruction::instruction_size; } @@ -273,12 +280,19 @@ } } else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) { u_int32_t *insns = (u_int32_t *)insn_addr; - // Move wide constant: movz, movk, movk. See movptr(). - assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); - assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); - return address(u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) + // Move wide constant: movz, movk, movk [, movk]. See movptr(). + assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch - 2nd movk missing"); + assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch - 3rd movk missing"); + u_int64_t addr = u_int64_t(Instruction_aarch64::extract(insns[0], 20, 5)) + (u_int64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) - + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); + + (u_int64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32); + + // Allow for getting the target address of a possible adddress. + if (Use64BitLiteralAddresses) { + assert(nativeInstruction_at(insns+3)->is_movk(), "wrong insns in patch - 4th movk missing."); + addr += u_int64_t(Instruction_aarch64::extract(insns[3], 20, 5)) << 48; + } + return (address) addr; } else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 && Instruction_aarch64::extract(insn, 4, 0) == 0b11111) { return 0; @@ -1487,10 +1501,9 @@ movptr(r, imm64); } -// Move a constant pointer into r. In AArch64 mode the virtual -// address space is 48 bits in size, so we only need three -// instructions to create a patchable instruction sequence that can -// reach anywhere. +// Move a constant pointer into r. In AArch64 mode the virtual address space +// is 48 bits in size or 52 bits. We need three or four instructions to create +// a patchable instruction sequence that can reach anywhere. void MacroAssembler::movptr(Register r, uintptr_t imm64) { #ifndef PRODUCT { @@ -1499,12 +1512,19 @@ block_comment(buffer); } #endif - assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); + if (!Use64BitLiteralAddresses) { + assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); + } movz(r, imm64 & 0xffff); imm64 >>= 16; movk(r, imm64 & 0xffff, 16); imm64 >>= 16; movk(r, imm64 & 0xffff, 32); + + if (Use64BitLiteralAddresses) { + imm64 >>= 16; + movk(r, imm64 & 0xffff, 48); + } } // Macro to mov replicated immediate to vector register. diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp @@ -231,6 +231,24 @@ //------------------------------------------------------------------- +int NativeMovConstReg::get_instruction_size() const { + if (nativeInstruction_at(instruction_address())->is_movz()) { + // See movptr(). + if (!Use64BitLiteralAddresses) { + // movz, movk, movk. + return 3 * NativeInstruction::instruction_size; + } else { + return 4 * NativeInstruction::instruction_size; + } + } else if (is_adrp_at(instruction_address())) { + return 2 * NativeInstruction::instruction_size; + } else if (is_ldr_literal_at(instruction_address())) { + return NativeInstruction::instruction_size; + } + assert(false, "Unknown instruction in NativeMovConstReg"); + return 0; + } + void NativeMovConstReg::verify() { // make sure code pattern is actually mov reg64, imm64 instructions } @@ -425,8 +443,18 @@ NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2)); if (inst2->is_movk()) { NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3)); - if (inst3->is_blr()) { - return true; + + if (Use64BitLiteralAddresses) { + if(inst3->is_movk()) { + NativeInstruction* inst4 = nativeInstruction_at(addr_at(instruction_size * 4)); + if (inst4->is_blr()) { + return true; + } + } else { + if (inst3->is_blr()) { + return true; + } + } } } } @@ -485,6 +513,9 @@ ICache::invalidate_range(verified_entry, instruction_size); } + +int NativeGeneralJump::instruction_size; + void NativeGeneralJump::verify() { } void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp --- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp @@ -90,7 +90,6 @@ oop oop_at (int offset) const { return *(oop*) addr_at(offset); } - void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; } void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } @@ -290,22 +289,13 @@ class NativeMovConstReg: public NativeInstruction { public: enum Aarch64_specific_constants { - instruction_size = 3 * 4, // movz, movk, movk. See movptr(). instruction_offset = 0, displacement_offset = 0, }; address instruction_address() const { return addr_at(instruction_offset); } address next_instruction_address() const { - if (nativeInstruction_at(instruction_address())->is_movz()) - // Assume movz, movk, movk - return addr_at(instruction_size); - else if (is_adrp_at(instruction_address())) - return addr_at(2*4); - else if (is_ldr_literal_at(instruction_address())) - return(addr_at(4)); - assert(false, "Unknown instruction in NativeMovConstReg"); - return NULL; + return addr_at(get_instruction_size()); } intptr_t data() const; @@ -313,10 +303,12 @@ void flush() { if (! maybe_cpool_ref(instruction_address())) { - ICache::invalidate_range(instruction_address(), instruction_size); + ICache::invalidate_range(instruction_address(), get_instruction_size()); } } + int get_instruction_size() const; + void verify(); void print(); @@ -325,7 +317,6 @@ // Creation inline friend NativeMovConstReg* nativeMovConstReg_at(address address); - inline friend NativeMovConstReg* nativeMovConstReg_before(address address); }; inline NativeMovConstReg* nativeMovConstReg_at(address address) { @@ -336,14 +327,6 @@ return test; } -inline NativeMovConstReg* nativeMovConstReg_before(address address) { - NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); -#ifdef ASSERT - test->verify(); -#endif - return test; -} - class NativeMovConstRegPatching: public NativeMovConstReg { private: friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { @@ -508,12 +491,12 @@ class NativeGeneralJump: public NativeJump { public: enum AArch64_specific_constants { - instruction_size = 4 * 4, instruction_offset = 0, - data_offset = 0, - next_instruction_offset = 4 * 4 + data_offset = 0 }; + static int instruction_size; + address jump_destination() const; void set_jump_destination(address dest); diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp --- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp @@ -31,6 +31,7 @@ #include "runtime/stubCodeGenerator.hpp" #include "utilities/macros.hpp" #include "vm_version_aarch64.hpp" +#include "nativeInst_aarch64.hpp" #include OS_HEADER_INLINE(os) @@ -430,6 +431,19 @@ #endif } +/* + * Configure instruction sizes for nativeInst_aarch64 based on Use64BitLiteralAddresses. + */ +static void set_instruction_sizes() { + if (Use64BitLiteralAddresses) { + // movz, movk, movk, movk, br. + NativeGeneralJump::instruction_size = 5 * NativeInstruction::instruction_size; + } else { + // movz, movk, movk, br. + NativeGeneralJump::instruction_size = 4 * NativeInstruction::instruction_size; + } +} + void VM_Version::initialize() { ResourceMark rm; @@ -445,5 +459,7 @@ get_processor_features(); + set_instruction_sizes(); + UNSUPPORTED_OPTION(CriticalJNINatives); } diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp --- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp @@ -241,6 +241,8 @@ // Call return is directly after patch word int PatchingStub::_patch_info_offset = 0; +int PatchingStub::patch_info_offset() { return _patch_info_offset; } + void PatchingStub::align_patch_site(MacroAssembler* masm) { #if 0 // TODO: investigate if we required to implement this diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp @@ -304,6 +304,8 @@ int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord); +int PatchingStub::patch_info_offset() { return _patch_info_offset; } + void PatchingStub::align_patch_site(MacroAssembler* ) { // Patch sites on ppc are always properly aligned. } diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -275,6 +275,8 @@ int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/); +int PatchingStub::patch_info_offset() { return _patch_info_offset; } + void PatchingStub::align_patch_site(MacroAssembler* masm) { #ifndef PRODUCT const char* bc; diff --git a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp +++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp @@ -281,6 +281,8 @@ int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; +int PatchingStub::patch_info_offset() { return _patch_info_offset; } + void PatchingStub::align_patch_site(MacroAssembler* ) { // patch sites on sparc are always properly aligned. } diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -283,6 +283,8 @@ int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; +int PatchingStub::patch_info_offset() { return _patch_info_offset; } + void PatchingStub::align_patch_site(MacroAssembler* masm) { // We're patching a 5-7 byte instruction on intel and we need to // make sure that we don't see a piece of the instruction. It diff --git a/src/hotspot/share/aot/aotLoader.cpp b/src/hotspot/share/aot/aotLoader.cpp --- a/src/hotspot/share/aot/aotLoader.cpp +++ b/src/hotspot/share/aot/aotLoader.cpp @@ -215,6 +215,13 @@ FLAG_SET_DEFAULT(UseAOT, false); } } + +#if defined(AARCH64) + if (Use64BitLiteralAddresses && UseAOT) { + warning("UseAOT disabled due to 64-bit Literal Addresses."); + FLAG_SET_CMDLINE(bool, UseAOT, false); + } +#endif } // Set shift value for compressed oops and classes based on first AOT library config. diff --git a/src/hotspot/share/c1/c1_CodeStubs.hpp b/src/hotspot/share/c1/c1_CodeStubs.hpp --- a/src/hotspot/share/c1/c1_CodeStubs.hpp +++ b/src/hotspot/share/c1/c1_CodeStubs.hpp @@ -388,7 +388,7 @@ void align_patch_site(MacroAssembler* masm); public: - static int patch_info_offset() { return _patch_info_offset; } + static int patch_info_offset(); PatchingStub(MacroAssembler* masm, PatchID id, int index = -1): _id(id) diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp --- a/src/hotspot/share/compiler/compilerDefinitions.cpp +++ b/src/hotspot/share/compiler/compilerDefinitions.cpp @@ -374,6 +374,14 @@ status = status && JVMCIGlobals::check_jvmci_flags_are_consistent(); #endif } + +#if INCLUDE_JVMCI && defined(AARCH64) + if (Use64BitLiteralAddresses && EnableJVMCI) { + warning("64-bit Literal Addresses disabled due to EnableJVMCI."); + FLAG_SET_CMDLINE(bool, Use64BitLiteralAddresses, false); + } +#endif + return status; }