< prev index next >
src/share/vm/c1/c1_LinearScan.cpp
Print this page
@@ -33,10 +33,13 @@
#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp"
#ifdef TARGET_ARCH_x86
# include "vmreg_x86.inline.hpp"
#endif
+#ifdef TARGET_ARCH_aarch64
+# include "vmreg_aarch64.inline.hpp"
+#endif
#ifdef TARGET_ARCH_sparc
# include "vmreg_sparc.inline.hpp"
#endif
#ifdef TARGET_ARCH_zero
# include "vmreg_zero.inline.hpp"
@@ -1091,11 +1094,11 @@
return shouldHaveRegister;
}
}
-#ifdef X86
+#if defined(X86)
if (op->code() == lir_cmove) {
// conditional moves can handle stack operands
assert(op->result_opr()->is_register(), "result must always be in a register");
return shouldHaveRegister;
}
@@ -2193,11 +2196,11 @@
interval = split_child_at_op_id(interval, op_id, mode);
}
LIR_Opr res = operand_for_interval(interval);
-#ifdef X86
+#if defined(X86) || defined(AARCH64)
// new semantic for is_last_use: not only set on definite end of interval,
// but also before hole
// This may still miss some cases (e.g. for dead values), but it is not necessary that the
// last use information is completely correct
// information is only needed for fpu stack allocation
@@ -4536,11 +4539,13 @@
#ifdef X86
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
#endif
} else {
+#if !defined(AARCH64)
ShouldNotReachHere();
+#endif
}
} else {
type_name = type2name(type());
if (assigned_reg() != -1 &&
(LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
@@ -5610,11 +5615,11 @@
split_and_spill_intersecting_intervals(reg, regHi);
}
}
bool LinearScanWalker::no_allocation_possible(Interval* cur) {
-#ifdef X86
+#if defined(X86)
// fast calculation of intervals that can never get a register because the
// the next instruction is a call that blocks all registers
// Note: this does not work if callee-saved registers are available (e.g. on Sparc)
// check if this interval is the result of a split operation
< prev index next >