1 /*
   2  * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.amd64;
  24 
  25 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  26 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  27 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  28 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  30 import static jdk.vm.ci.code.ValueUtil.asRegister;
  31 import static jdk.vm.ci.code.ValueUtil.isRegister;
  32 
  33 import org.graalvm.compiler.asm.Label;
  34 import org.graalvm.compiler.core.common.NumUtil;
  35 import org.graalvm.compiler.asm.amd64.AMD64Address;
  36 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  37 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  38 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  39 import org.graalvm.compiler.code.CompilationResult.JumpTable;
  40 import org.graalvm.compiler.core.common.calc.Condition;
  41 import org.graalvm.compiler.debug.GraalError;
  42 import org.graalvm.compiler.lir.LIRInstructionClass;
  43 import org.graalvm.compiler.lir.LabelRef;
  44 import org.graalvm.compiler.lir.Opcode;
  45 import org.graalvm.compiler.lir.StandardOp;
  46 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
  47 import org.graalvm.compiler.lir.SwitchStrategy;
  48 import org.graalvm.compiler.lir.SwitchStrategy.BaseSwitchClosure;
  49 import org.graalvm.compiler.lir.Variable;
  50 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  51 
  52 import jdk.vm.ci.amd64.AMD64;
  53 import jdk.vm.ci.amd64.AMD64.CPUFeature;
  54 import jdk.vm.ci.amd64.AMD64Kind;
  55 import jdk.vm.ci.code.Register;
  56 import jdk.vm.ci.meta.AllocatableValue;
  57 import jdk.vm.ci.meta.Constant;
  58 import jdk.vm.ci.meta.JavaConstant;
  59 import jdk.vm.ci.meta.Value;
  60 
  61 public class AMD64ControlFlow {
  62 
  63     public static final class ReturnOp extends AMD64BlockEndOp implements BlockEndOp {
  64         public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
  65         @Use({REG, ILLEGAL}) protected Value x;
  66 
  67         public ReturnOp(Value x) {
  68             super(TYPE);
  69             this.x = x;
  70         }
  71 
  72         @Override
  73         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  74             crb.frameContext.leave(crb);
  75             /*
  76              * We potentially return to the interpreter, and that's an AVX-SSE transition. The only
  77              * live value at this point should be the return value in either rax, or in xmm0 with
  78              * the upper half of the register unused, so we don't destroy any value here.
  79              */
  80             if (masm.supports(CPUFeature.AVX)) {
  81                 masm.vzeroupper();
  82             }
  83             masm.ret(0);
  84         }
  85     }
  86 
  87     public static class BranchOp extends AMD64BlockEndOp implements StandardOp.BranchOp {
  88         public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
  89         protected final ConditionFlag condition;
  90         protected final LabelRef trueDestination;
  91         protected final LabelRef falseDestination;
  92 
  93         private final double trueDestinationProbability;
  94 
  95         public BranchOp(Condition condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
  96             this(intCond(condition), trueDestination, falseDestination, trueDestinationProbability);
  97         }
  98 
  99         public BranchOp(ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 100             this(TYPE, condition, trueDestination, falseDestination, trueDestinationProbability);
 101         }
 102 
 103         protected BranchOp(LIRInstructionClass<? extends BranchOp> c, ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 104             super(c);
 105             this.condition = condition;
 106             this.trueDestination = trueDestination;
 107             this.falseDestination = falseDestination;
 108             this.trueDestinationProbability = trueDestinationProbability;
 109         }
 110 
 111         @Override
 112         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 113             boolean isNegated = false;
 114             int jccPos = masm.position();
 115             /*
 116              * The strategy for emitting jumps is: If either trueDestination or falseDestination is
 117              * the successor block, assume the block scheduler did the correct thing and jcc to the
 118              * other. Otherwise, we need a jcc followed by a jmp. Use the branch probability to make
 119              * sure it is more likely to branch on the jcc (= less likely to execute both the jcc
 120              * and the jmp instead of just the jcc). In the case of loops, that means the jcc is the
 121              * back-edge.
 122              */
 123             if (crb.isSuccessorEdge(trueDestination)) {
 124                 jcc(masm, true, falseDestination);
 125                 isNegated = true;
 126             } else if (crb.isSuccessorEdge(falseDestination)) {
 127                 jcc(masm, false, trueDestination);
 128             } else if (trueDestinationProbability < 0.5) {
 129                 jcc(masm, true, falseDestination);
 130                 masm.jmp(trueDestination.label());
 131                 isNegated = true;
 132             } else {
 133                 jcc(masm, false, trueDestination);
 134                 masm.jmp(falseDestination.label());
 135             }
 136             crb.recordBranch(jccPos, isNegated);
 137         }
 138 
 139         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 140             masm.jcc(negate ? condition.negate() : condition, target.label());
 141         }
 142     }
 143 
 144     public static final class FloatBranchOp extends BranchOp {
 145         public static final LIRInstructionClass<FloatBranchOp> TYPE = LIRInstructionClass.create(FloatBranchOp.class);
 146         protected boolean unorderedIsTrue;
 147 
 148         public FloatBranchOp(Condition condition, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 149             super(TYPE, floatCond(condition), trueDestination, falseDestination, trueDestinationProbability);
 150             this.unorderedIsTrue = unorderedIsTrue;
 151         }
 152 
 153         @Override
 154         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 155             floatJcc(masm, negate ? condition.negate() : condition, negate ? !unorderedIsTrue : unorderedIsTrue, target.label());
 156         }
 157     }
 158 
 159     public static class StrategySwitchOp extends AMD64BlockEndOp {
 160         public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
 161         protected final Constant[] keyConstants;
 162         private final LabelRef[] keyTargets;
 163         private LabelRef defaultTarget;
 164         @Alive({REG}) protected Value key;
 165         @Temp({REG, ILLEGAL}) protected Value scratch;
 166         protected final SwitchStrategy strategy;
 167 
 168         public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 169             this(TYPE, strategy, keyTargets, defaultTarget, key, scratch);
 170         }
 171 
 172         protected StrategySwitchOp(LIRInstructionClass<? extends StrategySwitchOp> c, SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 173             super(c);
 174             this.strategy = strategy;
 175             this.keyConstants = strategy.getKeyConstants();
 176             this.keyTargets = keyTargets;
 177             this.defaultTarget = defaultTarget;
 178             this.key = key;
 179             this.scratch = scratch;
 180             assert keyConstants.length == keyTargets.length;
 181             assert keyConstants.length == strategy.keyProbabilities.length;
 182         }
 183 
 184         @Override
 185         public void emitCode(final CompilationResultBuilder crb, final AMD64MacroAssembler masm) {
 186             strategy.run(new SwitchClosure(asRegister(key), crb, masm));
 187         }
 188 
 189         public class SwitchClosure extends BaseSwitchClosure {
 190 
 191             protected final Register keyRegister;
 192             protected final CompilationResultBuilder crb;
 193             protected final AMD64MacroAssembler masm;
 194 
 195             protected SwitchClosure(Register keyRegister, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 196                 super(crb, masm, keyTargets, defaultTarget);
 197                 this.keyRegister = keyRegister;
 198                 this.crb = crb;
 199                 this.masm = masm;
 200             }
 201 
 202             protected void emitComparison(Constant c) {
 203                 JavaConstant jc = (JavaConstant) c;
 204                 switch (jc.getJavaKind()) {
 205                     case Int:
 206                         long lc = jc.asLong();
 207                         assert NumUtil.isInt(lc);
 208                         masm.cmpl(keyRegister, (int) lc);
 209                         break;
 210                     case Long:
 211                         masm.cmpq(keyRegister, (AMD64Address) crb.asLongConstRef(jc));
 212                         break;
 213                     case Object:
 214                         AMD64Move.const2reg(crb, masm, asRegister(scratch), jc);
 215                         masm.cmpptr(keyRegister, asRegister(scratch));
 216                         break;
 217                     default:
 218                         throw new GraalError("switch only supported for int, long and object");
 219                 }
 220             }
 221 
 222             @Override
 223             protected void conditionalJump(int index, Condition condition, Label target) {
 224                 emitComparison(keyConstants[index]);
 225                 masm.jcc(intCond(condition), target);
 226             }
 227         }
 228     }
 229 
 230     public static final class TableSwitchOp extends AMD64BlockEndOp {
 231         public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
 232         private final int lowKey;
 233         private final LabelRef defaultTarget;
 234         private final LabelRef[] targets;
 235         @Use protected Value index;
 236         @Temp({REG, HINT}) protected Value idxScratch;
 237         @Temp protected Value scratch;
 238 
 239         public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) {
 240             super(TYPE);
 241             this.lowKey = lowKey;
 242             this.defaultTarget = defaultTarget;
 243             this.targets = targets;
 244             this.index = index;
 245             this.scratch = scratch;
 246             this.idxScratch = idxScratch;
 247         }
 248 
 249         @Override
 250         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 251             Register indexReg = asRegister(index, AMD64Kind.DWORD);
 252             Register idxScratchReg = asRegister(idxScratch, AMD64Kind.DWORD);
 253             Register scratchReg = asRegister(scratch, AMD64Kind.QWORD);
 254 
 255             if (!indexReg.equals(idxScratchReg)) {
 256                 masm.movl(idxScratchReg, indexReg);
 257             }
 258 
 259             // Compare index against jump table bounds
 260             int highKey = lowKey + targets.length - 1;
 261             if (lowKey != 0) {
 262                 // subtract the low value from the switch value
 263                 masm.subl(idxScratchReg, lowKey);
 264                 masm.cmpl(idxScratchReg, highKey - lowKey);
 265             } else {
 266                 masm.cmpl(idxScratchReg, highKey);
 267             }
 268 
 269             // Jump to default target if index is not within the jump table
 270             if (defaultTarget != null) {
 271                 masm.jcc(ConditionFlag.Above, defaultTarget.label());
 272             }
 273 
 274             // Set scratch to address of jump table
 275             masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
 276             final int afterLea = masm.position();
 277 
 278             // Load jump table entry into scratch and jump to it
 279             masm.movslq(idxScratchReg, new AMD64Address(scratchReg, idxScratchReg, Scale.Times4, 0));
 280             masm.addq(scratchReg, idxScratchReg);
 281             masm.jmp(scratchReg);
 282 
 283             // Inserting padding so that jump table address is 4-byte aligned
 284             if ((masm.position() & 0x3) != 0) {
 285                 masm.nop(4 - (masm.position() & 0x3));
 286             }
 287 
 288             // Patch LEA instruction above now that we know the position of the jump table
 289             // TODO this is ugly and should be done differently
 290             final int jumpTablePos = masm.position();
 291             final int leaDisplacementPosition = afterLea - 4;
 292             masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
 293 
 294             // Emit jump table entries
 295             for (LabelRef target : targets) {
 296                 Label label = target.label();
 297                 int offsetToJumpTableBase = masm.position() - jumpTablePos;
 298                 if (label.isBound()) {
 299                     int imm32 = label.position() - jumpTablePos;
 300                     masm.emitInt(imm32);
 301                 } else {
 302                     label.addPatchAt(masm.position());
 303 
 304                     masm.emitByte(0); // pseudo-opcode for jump table entry
 305                     masm.emitShort(offsetToJumpTableBase);
 306                     masm.emitByte(0); // padding to make jump table entry 4 bytes wide
 307                 }
 308             }
 309 
 310             JumpTable jt = new JumpTable(jumpTablePos, lowKey, highKey, 4);
 311             crb.compilationResult.addAnnotation(jt);
 312         }
 313     }
 314 
 315     @Opcode("CMOVE")
 316     public static final class CondMoveOp extends AMD64LIRInstruction {
 317         public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
 318         @Def({REG, HINT}) protected Value result;
 319         @Alive({REG}) protected Value trueValue;
 320         @Use({REG, STACK, CONST}) protected Value falseValue;
 321         private final ConditionFlag condition;
 322 
 323         public CondMoveOp(Variable result, Condition condition, AllocatableValue trueValue, Value falseValue) {
 324             super(TYPE);
 325             this.result = result;
 326             this.condition = intCond(condition);
 327             this.trueValue = trueValue;
 328             this.falseValue = falseValue;
 329         }
 330 
 331         @Override
 332         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 333             cmove(crb, masm, result, false, condition, false, trueValue, falseValue);
 334         }
 335     }
 336 
 337     @Opcode("CMOVE")
 338     public static final class FloatCondMoveOp extends AMD64LIRInstruction {
 339         public static final LIRInstructionClass<FloatCondMoveOp> TYPE = LIRInstructionClass.create(FloatCondMoveOp.class);
 340         @Def({REG}) protected Value result;
 341         @Alive({REG}) protected Value trueValue;
 342         @Alive({REG}) protected Value falseValue;
 343         private final ConditionFlag condition;
 344         private final boolean unorderedIsTrue;
 345 
 346         public FloatCondMoveOp(Variable result, Condition condition, boolean unorderedIsTrue, Variable trueValue, Variable falseValue) {
 347             super(TYPE);
 348             this.result = result;
 349             this.condition = floatCond(condition);
 350             this.unorderedIsTrue = unorderedIsTrue;
 351             this.trueValue = trueValue;
 352             this.falseValue = falseValue;
 353         }
 354 
 355         @Override
 356         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 357             cmove(crb, masm, result, true, condition, unorderedIsTrue, trueValue, falseValue);
 358         }
 359     }
 360 
 361     private static void floatJcc(AMD64MacroAssembler masm, ConditionFlag condition, boolean unorderedIsTrue, Label label) {
 362         Label endLabel = new Label();
 363         if (unorderedIsTrue && !trueOnUnordered(condition)) {
 364             masm.jcc(ConditionFlag.Parity, label);
 365         } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 366             masm.jccb(ConditionFlag.Parity, endLabel);
 367         }
 368         masm.jcc(condition, label);
 369         masm.bind(endLabel);
 370     }
 371 
 372     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, boolean isFloat, ConditionFlag condition, boolean unorderedIsTrue, Value trueValue,
 373                     Value falseValue) {
 374         // check that we don't overwrite an input operand before it is used.
 375         assert !result.equals(trueValue);
 376 
 377         AMD64Move.move(crb, masm, result, falseValue);
 378         cmove(crb, masm, result, condition, trueValue);
 379 
 380         if (isFloat) {
 381             if (unorderedIsTrue && !trueOnUnordered(condition)) {
 382                 cmove(crb, masm, result, ConditionFlag.Parity, trueValue);
 383             } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 384                 cmove(crb, masm, result, ConditionFlag.Parity, falseValue);
 385             }
 386         }
 387     }
 388 
 389     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, ConditionFlag cond, Value other) {
 390         if (isRegister(other)) {
 391             assert !asRegister(other).equals(asRegister(result)) : "other already overwritten by previous move";
 392             switch ((AMD64Kind) other.getPlatformKind()) {
 393                 case BYTE:
 394                 case WORD:
 395                 case DWORD:
 396                     masm.cmovl(cond, asRegister(result), asRegister(other));
 397                     break;
 398                 case QWORD:
 399                     masm.cmovq(cond, asRegister(result), asRegister(other));
 400                     break;
 401                 default:
 402                     throw GraalError.shouldNotReachHere();
 403             }
 404         } else {
 405             AMD64Address addr = (AMD64Address) crb.asAddress(other);
 406             switch ((AMD64Kind) other.getPlatformKind()) {
 407                 case BYTE:
 408                 case WORD:
 409                 case DWORD:
 410                     masm.cmovl(cond, asRegister(result), addr);
 411                     break;
 412                 case QWORD:
 413                     masm.cmovq(cond, asRegister(result), addr);
 414                     break;
 415                 default:
 416                     throw GraalError.shouldNotReachHere();
 417             }
 418         }
 419     }
 420 
 421     private static ConditionFlag intCond(Condition cond) {
 422         switch (cond) {
 423             case EQ:
 424                 return ConditionFlag.Equal;
 425             case NE:
 426                 return ConditionFlag.NotEqual;
 427             case LT:
 428                 return ConditionFlag.Less;
 429             case LE:
 430                 return ConditionFlag.LessEqual;
 431             case GE:
 432                 return ConditionFlag.GreaterEqual;
 433             case GT:
 434                 return ConditionFlag.Greater;
 435             case BE:
 436                 return ConditionFlag.BelowEqual;
 437             case AE:
 438                 return ConditionFlag.AboveEqual;
 439             case AT:
 440                 return ConditionFlag.Above;
 441             case BT:
 442                 return ConditionFlag.Below;
 443             default:
 444                 throw GraalError.shouldNotReachHere();
 445         }
 446     }
 447 
 448     private static ConditionFlag floatCond(Condition cond) {
 449         switch (cond) {
 450             case EQ:
 451                 return ConditionFlag.Equal;
 452             case NE:
 453                 return ConditionFlag.NotEqual;
 454             case LT:
 455                 return ConditionFlag.Below;
 456             case LE:
 457                 return ConditionFlag.BelowEqual;
 458             case GE:
 459                 return ConditionFlag.AboveEqual;
 460             case GT:
 461                 return ConditionFlag.Above;
 462             default:
 463                 throw GraalError.shouldNotReachHere();
 464         }
 465     }
 466 
 467     private static boolean trueOnUnordered(ConditionFlag condition) {
 468         switch (condition) {
 469             case AboveEqual:
 470             case NotEqual:
 471             case Above:
 472             case Less:
 473             case Overflow:
 474                 return false;
 475             case Equal:
 476             case BelowEqual:
 477             case Below:
 478             case GreaterEqual:
 479             case NoOverflow:
 480                 return true;
 481             default:
 482                 throw GraalError.shouldNotReachHere();
 483         }
 484     }
 485 }