< prev index next >
src/hotspot/cpu/aarch64/assembler_aarch64.hpp
Print this page
rev 60626 : 8248663: AArch64: Avoid existing macros/keywords of MSVC
Reviewed-by:
Contributed-by: mbeckwit, luhenry, burban
rev 60629 : 8248656: Add Windows AArch64 platform support code
Reviewed-by:
Contributed-by: mbeckwit, luhenry, burban
rev 60631 : 8248660: AArch64: Make _clear_cache and _nop portable
Reviewed-by:
Contributed-by: mbeckwit, luhenry, burban
@@ -26,10 +26,24 @@
#ifndef CPU_AARCH64_ASSEMBLER_AARCH64_HPP
#define CPU_AARCH64_ASSEMBLER_AARCH64_HPP
#include "asm/register.hpp"
+#ifdef __GNUC__
+
+// __nop needs volatile so that compiler doesn't optimize it away
+#define NOP() asm volatile ("nop");
+
+#elif defined(_MSC_VER)
+
+// Use MSVC instrinsic: https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019#I
+#define NOP() __nop();
+
+#endif
+
+
+
// definitions of various symbolic names for machine registers
// First intercalls between C and Java which use 8 general registers
// and 8 floating registers
@@ -611,11 +625,11 @@
#ifndef PRODUCT
static const uintptr_t asm_bp;
void emit_long(jint x) {
if ((uintptr_t)pc() == asm_bp)
- asm volatile ("nop");
+ NOP();
AbstractAssembler::emit_int32(x);
}
#else
void emit_long(jint x) {
AbstractAssembler::emit_int32(x);
@@ -651,10 +665,12 @@
Address post(Register base, Register idx) {
return Address(Post(base, idx));
}
+ static address locate_next_instruction(address inst);
+
Instruction_aarch64* current;
void set_current(Instruction_aarch64* i) { current = i; }
void f(unsigned val, int msb, int lsb) {
@@ -1520,10 +1536,15 @@
INSN(eonw, 0, 0b10, 1);
INSN(bicsw, 0, 0b11, 1);
#undef INSN
+#ifdef _WIN64
+// In MSVC, `mvn` is defined as a macro and it affects compilation
+#undef mvn
+#endif
+
// Aliases for short forms of orn
void mvn(Register Rd, Register Rm,
enum shift_kind kind = LSL, unsigned shift = 0) {
orn(Rd, zr, Rm, kind, shift);
}
< prev index next >