1 /* 2 * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 package org.graalvm.compiler.lir.aarch64; 26 27 import static jdk.vm.ci.aarch64.AArch64.lr; 28 import static jdk.vm.ci.code.ValueUtil.asAllocatableValue; 29 import static jdk.vm.ci.code.ValueUtil.asRegister; 30 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT; 31 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL; 32 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG; 33 34 import java.util.function.Function; 35 36 import jdk.vm.ci.meta.AllocatableValue; 37 import org.graalvm.compiler.asm.Label; 38 import org.graalvm.compiler.asm.aarch64.AArch64Assembler; 39 import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag; 40 import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ExtendType; 41 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler; 42 import org.graalvm.compiler.code.CompilationResult.JumpTable; 43 import org.graalvm.compiler.core.common.LIRKind; 44 import org.graalvm.compiler.core.common.NumUtil; 45 import org.graalvm.compiler.core.common.calc.Condition; 46 import org.graalvm.compiler.debug.GraalError; 47 import org.graalvm.compiler.lir.ConstantValue; 48 import org.graalvm.compiler.lir.LIRInstruction; 49 import org.graalvm.compiler.lir.LIRInstructionClass; 50 import org.graalvm.compiler.lir.LabelRef; 51 import org.graalvm.compiler.lir.Opcode; 52 import org.graalvm.compiler.lir.StandardOp; 53 import org.graalvm.compiler.lir.StandardOp.BlockEndOp; 54 import org.graalvm.compiler.lir.SwitchStrategy; 55 import org.graalvm.compiler.lir.SwitchStrategy.BaseSwitchClosure; 56 import org.graalvm.compiler.lir.Variable; 57 import org.graalvm.compiler.lir.asm.CompilationResultBuilder; 58 59 import jdk.vm.ci.aarch64.AArch64Kind; 60 import jdk.vm.ci.code.Register; 61 import jdk.vm.ci.meta.Constant; 62 import jdk.vm.ci.meta.JavaConstant; 63 import jdk.vm.ci.meta.Value; 64 65 public class AArch64ControlFlow { 66 public static final class ReturnOp extends AArch64BlockEndOp implements BlockEndOp { 67 public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class); 68 @Use({REG, ILLEGAL}) protected Value x; 69 70 public ReturnOp(Value x) { 71 super(TYPE); 72 this.x = x; 73 } 74 75 @Override 76 protected void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 77 crb.frameContext.leave(crb); 78 masm.ret(lr); 79 } 80 } 81 82 public abstract static class AbstractBranchOp extends AArch64BlockEndOp implements StandardOp.BranchOp { 83 private final LabelRef trueDestination; 84 private final LabelRef falseDestination; 85 86 private final double trueDestinationProbability; 87 88 private AbstractBranchOp(LIRInstructionClass<? extends AbstractBranchOp> c, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 89 super(c); 90 this.trueDestination = trueDestination; 91 this.falseDestination = falseDestination; 92 this.trueDestinationProbability = trueDestinationProbability; 93 } 94 95 protected abstract void emitBranch(CompilationResultBuilder crb, AArch64MacroAssembler masm, LabelRef target, boolean negate); 96 97 @Override 98 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 99 /* 100 * Explanation: Depending on what the successor edge is, we can use the fall-through to 101 * optimize the generated code. If neither is a successor edge, use the branch 102 * probability to try to take the conditional jump as often as possible to avoid 103 * executing two instructions instead of one. 104 */ 105 if (crb.isSuccessorEdge(trueDestination)) { 106 emitBranch(crb, masm, falseDestination, true); 107 } else if (crb.isSuccessorEdge(falseDestination)) { 108 emitBranch(crb, masm, trueDestination, false); 109 } else if (trueDestinationProbability < 0.5) { 110 emitBranch(crb, masm, falseDestination, true); 111 masm.jmp(trueDestination.label()); 112 } else { 113 emitBranch(crb, masm, trueDestination, false); 114 masm.jmp(falseDestination.label()); 115 } 116 } 117 } 118 119 public static class BranchOp extends AbstractBranchOp implements StandardOp.BranchOp { 120 public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class); 121 122 private final AArch64Assembler.ConditionFlag condition; 123 124 public BranchOp(AArch64Assembler.ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 125 super(TYPE, trueDestination, falseDestination, trueDestinationProbability); 126 this.condition = condition; 127 } 128 129 @Override 130 protected void emitBranch(CompilationResultBuilder crb, AArch64MacroAssembler masm, LabelRef target, boolean negate) { 131 AArch64Assembler.ConditionFlag finalCond = negate ? condition.negate() : condition; 132 masm.branchConditionally(finalCond, target.label()); 133 } 134 } 135 136 public static class CompareBranchZeroOp extends AbstractBranchOp implements StandardOp.BranchOp { 137 public static final LIRInstructionClass<CompareBranchZeroOp> TYPE = LIRInstructionClass.create(CompareBranchZeroOp.class); 138 139 @Use(REG) private AllocatableValue value; 140 141 public CompareBranchZeroOp(AllocatableValue value, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 142 super(TYPE, trueDestination, falseDestination, trueDestinationProbability); 143 this.value = value; 144 } 145 146 @Override 147 protected void emitBranch(CompilationResultBuilder crb, AArch64MacroAssembler masm, LabelRef target, boolean negate) { 148 AArch64Kind kind = (AArch64Kind) this.value.getPlatformKind(); 149 assert kind.isInteger(); 150 int size = kind.getSizeInBytes() * Byte.SIZE; 151 152 Label label = target.label(); 153 boolean isFarBranch = isFarBranch(this, 21, crb, masm, label); 154 boolean useCbnz; 155 if (isFarBranch) { 156 useCbnz = !negate; 157 label = new Label(); 158 } else { 159 useCbnz = negate; 160 } 161 162 if (useCbnz) { 163 masm.cbnz(size, asRegister(this.value), label); 164 } else { 165 masm.cbz(size, asRegister(this.value), label); 166 } 167 168 if (isFarBranch) { 169 masm.jmp(target.label()); 170 masm.bind(label); 171 } 172 } 173 } 174 175 public static class BitTestAndBranchOp extends AbstractBranchOp implements StandardOp.BranchOp { 176 public static final LIRInstructionClass<BitTestAndBranchOp> TYPE = LIRInstructionClass.create(BitTestAndBranchOp.class); 177 178 @Use protected AllocatableValue value; 179 private final int index; 180 181 public BitTestAndBranchOp(LabelRef trueDestination, LabelRef falseDestination, AllocatableValue value, double trueDestinationProbability, int index) { 182 super(TYPE, trueDestination, falseDestination, trueDestinationProbability); 183 this.value = value; 184 this.index = index; 185 } 186 187 @Override 188 protected void emitBranch(CompilationResultBuilder crb, AArch64MacroAssembler masm, LabelRef target, boolean negate) { 189 ConditionFlag cond = negate ? ConditionFlag.NE : ConditionFlag.EQ; 190 Label label = target.label(); 191 boolean isFarBranch = isFarBranch(this, 14, crb, masm, label); 192 193 if (isFarBranch) { 194 cond = cond.negate(); 195 label = new Label(); 196 } 197 198 if (cond == ConditionFlag.EQ) { 199 masm.tbz(asRegister(value), index, label); 200 } else { 201 masm.tbnz(asRegister(value), index, label); 202 } 203 204 if (isFarBranch) { 205 masm.jmp(target.label()); 206 masm.bind(label); 207 } 208 } 209 } 210 211 @Opcode("CMOVE") 212 public static class CondMoveOp extends AArch64LIRInstruction { 213 public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class); 214 215 @Def protected Value result; 216 @Use protected Value trueValue; 217 @Use protected Value falseValue; 218 private final AArch64Assembler.ConditionFlag condition; 219 220 public CondMoveOp(Variable result, AArch64Assembler.ConditionFlag condition, Value trueValue, Value falseValue) { 221 super(TYPE); 222 assert trueValue.getPlatformKind() == falseValue.getPlatformKind() && trueValue.getPlatformKind() == result.getPlatformKind(); 223 this.result = result; 224 this.condition = condition; 225 this.trueValue = trueValue; 226 this.falseValue = falseValue; 227 } 228 229 @Override 230 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 231 AArch64Kind kind = (AArch64Kind) trueValue.getPlatformKind(); 232 int size = kind.getSizeInBytes() * Byte.SIZE; 233 if (kind.isInteger()) { 234 masm.cmov(size, asRegister(result), asRegister(trueValue), asRegister(falseValue), condition); 235 } else { 236 masm.fcmov(size, asRegister(result), asRegister(trueValue), asRegister(falseValue), condition); 237 } 238 } 239 } 240 241 public static class CondSetOp extends AArch64LIRInstruction { 242 public static final LIRInstructionClass<CondSetOp> TYPE = LIRInstructionClass.create(CondSetOp.class); 243 244 @Def protected Value result; 245 private final AArch64Assembler.ConditionFlag condition; 246 247 public CondSetOp(Variable result, AArch64Assembler.ConditionFlag condition) { 248 super(TYPE); 249 this.result = result; 250 this.condition = condition; 251 } 252 253 @Override 254 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 255 int size = result.getPlatformKind().getSizeInBytes() * Byte.SIZE; 256 masm.cset(size, asRegister(result), condition); 257 } 258 } 259 260 public static class StrategySwitchOp extends AArch64BlockEndOp implements StandardOp.BlockEndOp { 261 public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class); 262 263 private final Constant[] keyConstants; 264 protected final SwitchStrategy strategy; 265 private final Function<Condition, ConditionFlag> converter; 266 private final LabelRef[] keyTargets; 267 private final LabelRef defaultTarget; 268 @Alive protected Value key; 269 // TODO (das) This could be optimized: We only need the scratch register in case of a 270 // datapatch, or too large immediates. 271 @Temp protected Value scratch; 272 273 public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch, 274 Function<Condition, ConditionFlag> converter) { 275 this(TYPE, strategy, keyTargets, defaultTarget, key, scratch, converter); 276 } 277 278 protected StrategySwitchOp(LIRInstructionClass<? extends StrategySwitchOp> c, SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch, 279 Function<Condition, ConditionFlag> converter) { 280 super(c); 281 this.strategy = strategy; 282 this.converter = converter; 283 this.keyConstants = strategy.getKeyConstants(); 284 this.keyTargets = keyTargets; 285 this.defaultTarget = defaultTarget; 286 this.key = key; 287 this.scratch = scratch; 288 assert keyConstants.length == keyTargets.length; 289 assert keyConstants.length == strategy.keyProbabilities.length; 290 } 291 292 @Override 293 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 294 strategy.run(new SwitchClosure(asRegister(key), crb, masm)); 295 } 296 297 public class SwitchClosure extends BaseSwitchClosure { 298 299 protected final Register keyRegister; 300 protected final CompilationResultBuilder crb; 301 protected final AArch64MacroAssembler masm; 302 303 protected SwitchClosure(Register keyRegister, CompilationResultBuilder crb, AArch64MacroAssembler masm) { 304 super(crb, masm, keyTargets, defaultTarget); 305 this.keyRegister = keyRegister; 306 this.crb = crb; 307 this.masm = masm; 308 } 309 310 protected void emitComparison(Constant c) { 311 JavaConstant jc = (JavaConstant) c; 312 ConstantValue constVal = new ConstantValue(LIRKind.value(key.getPlatformKind()), c); 313 switch (jc.getJavaKind()) { 314 case Int: 315 long lc = jc.asLong(); 316 assert NumUtil.isInt(lc); 317 emitCompare(crb, masm, key, scratch, constVal); 318 break; 319 case Long: 320 emitCompare(crb, masm, key, scratch, constVal); 321 break; 322 case Object: 323 emitCompare(crb, masm, key, scratch, constVal); 324 break; 325 default: 326 throw new GraalError("switch only supported for int, long and object"); 327 } 328 } 329 330 @Override 331 protected void conditionalJump(int index, Condition condition, Label target) { 332 emitComparison(keyConstants[index]); 333 masm.branchConditionally(converter.apply(condition), target); 334 } 335 } 336 } 337 338 public static final class TableSwitchOp extends AArch64BlockEndOp { 339 public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class); 340 private final int lowKey; 341 private final LabelRef defaultTarget; 342 private final LabelRef[] targets; 343 @Use protected Value index; 344 @Temp({REG, HINT}) protected Value idxScratch; 345 @Temp protected Value scratch; 346 347 public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) { 348 super(TYPE); 349 this.lowKey = lowKey; 350 this.defaultTarget = defaultTarget; 351 this.targets = targets; 352 this.index = index; 353 this.scratch = scratch; 354 this.idxScratch = idxScratch; 355 } 356 357 @Override 358 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 359 Register indexReg = asRegister(index, AArch64Kind.DWORD); 360 Register idxScratchReg = asRegister(idxScratch, AArch64Kind.DWORD); 361 Register scratchReg = asRegister(scratch, AArch64Kind.QWORD); 362 363 // Compare index against jump table bounds 364 int highKey = lowKey + targets.length - 1; 365 masm.sub(32, idxScratchReg, indexReg, lowKey); 366 masm.cmp(32, idxScratchReg, highKey - lowKey); 367 368 // Jump to default target if index is not within the jump table 369 if (defaultTarget != null) { 370 masm.branchConditionally(ConditionFlag.HI, defaultTarget.label()); 371 } 372 373 Label jumpTable = new Label(); 374 masm.adr(scratchReg, jumpTable); 375 masm.add(64, scratchReg, scratchReg, idxScratchReg, ExtendType.UXTW, 2); 376 masm.jmp(scratchReg); 377 masm.bind(jumpTable); 378 // emit jump table entries 379 for (LabelRef target : targets) { 380 masm.jmp(target.label()); 381 } 382 JumpTable jt = new JumpTable(jumpTable.position(), lowKey, highKey - 1, 4); 383 crb.compilationResult.addAnnotation(jt); 384 } 385 } 386 387 private static void emitCompare(CompilationResultBuilder crb, AArch64MacroAssembler masm, Value key, Value scratchValue, ConstantValue c) { 388 long imm = c.getJavaConstant().asLong(); 389 final int size = key.getPlatformKind().getSizeInBytes() * Byte.SIZE; 390 if (AArch64MacroAssembler.isComparisonImmediate(imm)) { 391 masm.cmp(size, asRegister(key), (int) imm); 392 } else { 393 AArch64Move.move(crb, masm, asAllocatableValue(scratchValue), c); 394 masm.cmp(size, asRegister(key), asRegister(scratchValue)); 395 } 396 } 397 398 private static boolean isFarBranch(LIRInstruction instruction, int offsetBits, CompilationResultBuilder crb, AArch64MacroAssembler masm, Label label) { 399 boolean isFarBranch; 400 if (label.isBound()) { 401 // The label.position() is a byte based index. The instruction instruction has 402 // offsetBits bits for the offset and AArch64 instruction is 4 bytes aligned. So 403 // instruction can encode offsetBits+2 bits signed offset. 404 isFarBranch = !NumUtil.isSignedNbit(offsetBits + 2, masm.position() - label.position()); 405 } else { 406 // Max range of instruction is 2^offsetBits instructions. We estimate that each LIR 407 // instruction emits 2 AArch64 instructions on average. Thus we test for maximum 408 // 2^(offsetBits-2) LIR instruction offset. 409 int maxLIRDistance = (1 << (offsetBits - 2)); 410 isFarBranch = !crb.labelWithinRange(instruction, label, maxLIRDistance); 411 } 412 return isFarBranch; 413 } 414 }