1 /*
   2  * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.amd64;
  24 
  25 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  26 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  27 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  28 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  30 import static jdk.vm.ci.code.ValueUtil.asRegister;
  31 import static jdk.vm.ci.code.ValueUtil.isRegister;
  32 
  33 import org.graalvm.compiler.asm.Label;
  34 import org.graalvm.compiler.core.common.NumUtil;
  35 import org.graalvm.compiler.asm.amd64.AMD64Address;
  36 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  37 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  38 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  39 import org.graalvm.compiler.code.CompilationResult.JumpTable;
  40 import org.graalvm.compiler.core.common.calc.Condition;
  41 import org.graalvm.compiler.debug.GraalError;
  42 import org.graalvm.compiler.lir.LIRInstructionClass;
  43 import org.graalvm.compiler.lir.LabelRef;
  44 import org.graalvm.compiler.lir.Opcode;
  45 import org.graalvm.compiler.lir.StandardOp;
  46 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
  47 import org.graalvm.compiler.lir.SwitchStrategy;
  48 import org.graalvm.compiler.lir.SwitchStrategy.BaseSwitchClosure;
  49 import org.graalvm.compiler.lir.Variable;
  50 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  51 
  52 import jdk.vm.ci.amd64.AMD64;
  53 import jdk.vm.ci.amd64.AMD64.CPUFeature;
  54 import jdk.vm.ci.amd64.AMD64Kind;
  55 import jdk.vm.ci.code.Register;
  56 import jdk.vm.ci.meta.AllocatableValue;
  57 import jdk.vm.ci.meta.Constant;
  58 import jdk.vm.ci.meta.JavaConstant;
  59 import jdk.vm.ci.meta.Value;
  60 
  61 public class AMD64ControlFlow {
  62 
  63     public static final class ReturnOp extends AMD64BlockEndOp implements BlockEndOp {
  64         public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
  65         @Use({REG, ILLEGAL}) protected Value x;
  66 
  67         public ReturnOp(Value x) {
  68             super(TYPE);
  69             this.x = x;
  70         }
  71 
  72         @Override
  73         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  74             crb.frameContext.leave(crb);
  75             /*
  76              * We potentially return to the interpreter, and that's an AVX-SSE transition. The only
  77              * live value at this point should be the return value in either rax, or in xmm0 with
  78              * the upper half of the register unused, so we don't destroy any value here.
  79              */
  80             if (masm.supports(CPUFeature.AVX)) {
  81                 masm.vzeroupper();
  82             }
  83             masm.ret(0);
  84         }
  85     }
  86 
  87     public static class BranchOp extends AMD64BlockEndOp implements StandardOp.BranchOp {
  88         public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
  89         protected final ConditionFlag condition;
  90         protected final LabelRef trueDestination;
  91         protected final LabelRef falseDestination;
  92 
  93         private final double trueDestinationProbability;
  94 
  95         public BranchOp(Condition condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
  96             this(intCond(condition), trueDestination, falseDestination, trueDestinationProbability);
  97         }
  98 
  99         public BranchOp(ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 100             this(TYPE, condition, trueDestination, falseDestination, trueDestinationProbability);
 101         }
 102 
 103         protected BranchOp(LIRInstructionClass<? extends BranchOp> c, ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 104             super(c);
 105             this.condition = condition;
 106             this.trueDestination = trueDestination;
 107             this.falseDestination = falseDestination;
 108             this.trueDestinationProbability = trueDestinationProbability;
 109         }
 110 
 111         @Override
 112         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 113             /*
 114              * The strategy for emitting jumps is: If either trueDestination or falseDestination is
 115              * the successor block, assume the block scheduler did the correct thing and jcc to the
 116              * other. Otherwise, we need a jcc followed by a jmp. Use the branch probability to make
 117              * sure it is more likely to branch on the jcc (= less likely to execute both the jcc
 118              * and the jmp instead of just the jcc). In the case of loops, that means the jcc is the
 119              * back-edge.
 120              */
 121             if (crb.isSuccessorEdge(trueDestination)) {
 122                 jcc(masm, true, falseDestination);
 123             } else if (crb.isSuccessorEdge(falseDestination)) {
 124                 jcc(masm, false, trueDestination);
 125             } else if (trueDestinationProbability < 0.5) {
 126                 jcc(masm, true, falseDestination);
 127                 masm.jmp(trueDestination.label());
 128             } else {
 129                 jcc(masm, false, trueDestination);
 130                 masm.jmp(falseDestination.label());
 131             }
 132         }
 133 
 134         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 135             masm.jcc(negate ? condition.negate() : condition, target.label());
 136         }
 137     }
 138 
 139     public static final class FloatBranchOp extends BranchOp {
 140         public static final LIRInstructionClass<FloatBranchOp> TYPE = LIRInstructionClass.create(FloatBranchOp.class);
 141         protected boolean unorderedIsTrue;
 142 
 143         public FloatBranchOp(Condition condition, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 144             super(TYPE, floatCond(condition), trueDestination, falseDestination, trueDestinationProbability);
 145             this.unorderedIsTrue = unorderedIsTrue;
 146         }
 147 
 148         @Override
 149         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 150             floatJcc(masm, negate ? condition.negate() : condition, negate ? !unorderedIsTrue : unorderedIsTrue, target.label());
 151         }
 152     }
 153 
 154     public static class StrategySwitchOp extends AMD64BlockEndOp {
 155         public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
 156         protected final Constant[] keyConstants;
 157         private final LabelRef[] keyTargets;
 158         private LabelRef defaultTarget;
 159         @Alive({REG}) protected Value key;
 160         @Temp({REG, ILLEGAL}) protected Value scratch;
 161         protected final SwitchStrategy strategy;
 162 
 163         public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 164             this(TYPE, strategy, keyTargets, defaultTarget, key, scratch);
 165         }
 166 
 167         protected StrategySwitchOp(LIRInstructionClass<? extends StrategySwitchOp> c, SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 168             super(c);
 169             this.strategy = strategy;
 170             this.keyConstants = strategy.getKeyConstants();
 171             this.keyTargets = keyTargets;
 172             this.defaultTarget = defaultTarget;
 173             this.key = key;
 174             this.scratch = scratch;
 175             assert keyConstants.length == keyTargets.length;
 176             assert keyConstants.length == strategy.keyProbabilities.length;
 177         }
 178 
 179         @Override
 180         public void emitCode(final CompilationResultBuilder crb, final AMD64MacroAssembler masm) {
 181             strategy.run(new SwitchClosure(asRegister(key), crb, masm));
 182         }
 183 
 184         public class SwitchClosure extends BaseSwitchClosure {
 185 
 186             protected final Register keyRegister;
 187             protected final CompilationResultBuilder crb;
 188             protected final AMD64MacroAssembler masm;
 189 
 190             protected SwitchClosure(Register keyRegister, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 191                 super(crb, masm, keyTargets, defaultTarget);
 192                 this.keyRegister = keyRegister;
 193                 this.crb = crb;
 194                 this.masm = masm;
 195             }
 196 
 197             protected void emitComparison(Constant c) {
 198                 JavaConstant jc = (JavaConstant) c;
 199                 switch (jc.getJavaKind()) {
 200                     case Int:
 201                         long lc = jc.asLong();
 202                         assert NumUtil.isInt(lc);
 203                         masm.cmpl(keyRegister, (int) lc);
 204                         break;
 205                     case Long:
 206                         masm.cmpq(keyRegister, (AMD64Address) crb.asLongConstRef(jc));
 207                         break;
 208                     case Object:
 209                         AMD64Move.const2reg(crb, masm, asRegister(scratch), jc);
 210                         masm.cmpptr(keyRegister, asRegister(scratch));
 211                         break;
 212                     default:
 213                         throw new GraalError("switch only supported for int, long and object");
 214                 }
 215             }
 216 
 217             @Override
 218             protected void conditionalJump(int index, Condition condition, Label target) {
 219                 emitComparison(keyConstants[index]);
 220                 masm.jcc(intCond(condition), target);
 221             }
 222         }
 223     }
 224 
 225     public static final class TableSwitchOp extends AMD64BlockEndOp {
 226         public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
 227         private final int lowKey;
 228         private final LabelRef defaultTarget;
 229         private final LabelRef[] targets;
 230         @Use protected Value index;
 231         @Temp({REG, HINT}) protected Value idxScratch;
 232         @Temp protected Value scratch;
 233 
 234         public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) {
 235             super(TYPE);
 236             this.lowKey = lowKey;
 237             this.defaultTarget = defaultTarget;
 238             this.targets = targets;
 239             this.index = index;
 240             this.scratch = scratch;
 241             this.idxScratch = idxScratch;
 242         }
 243 
 244         @Override
 245         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 246             Register indexReg = asRegister(index, AMD64Kind.DWORD);
 247             Register idxScratchReg = asRegister(idxScratch, AMD64Kind.DWORD);
 248             Register scratchReg = asRegister(scratch, AMD64Kind.QWORD);
 249 
 250             if (!indexReg.equals(idxScratchReg)) {
 251                 masm.movl(idxScratchReg, indexReg);
 252             }
 253 
 254             // Compare index against jump table bounds
 255             int highKey = lowKey + targets.length - 1;
 256             if (lowKey != 0) {
 257                 // subtract the low value from the switch value
 258                 masm.subl(idxScratchReg, lowKey);
 259                 masm.cmpl(idxScratchReg, highKey - lowKey);
 260             } else {
 261                 masm.cmpl(idxScratchReg, highKey);
 262             }
 263 
 264             // Jump to default target if index is not within the jump table
 265             if (defaultTarget != null) {
 266                 masm.jcc(ConditionFlag.Above, defaultTarget.label());
 267             }
 268 
 269             // Set scratch to address of jump table
 270             masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
 271             final int afterLea = masm.position();
 272 
 273             // Load jump table entry into scratch and jump to it
 274             masm.movslq(idxScratchReg, new AMD64Address(scratchReg, idxScratchReg, Scale.Times4, 0));
 275             masm.addq(scratchReg, idxScratchReg);
 276             masm.jmp(scratchReg);
 277 
 278             // Inserting padding so that jump table address is 4-byte aligned
 279             if ((masm.position() & 0x3) != 0) {
 280                 masm.nop(4 - (masm.position() & 0x3));
 281             }
 282 
 283             // Patch LEA instruction above now that we know the position of the jump table
 284             // TODO this is ugly and should be done differently
 285             final int jumpTablePos = masm.position();
 286             final int leaDisplacementPosition = afterLea - 4;
 287             masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
 288 
 289             // Emit jump table entries
 290             for (LabelRef target : targets) {
 291                 Label label = target.label();
 292                 int offsetToJumpTableBase = masm.position() - jumpTablePos;
 293                 if (label.isBound()) {
 294                     int imm32 = label.position() - jumpTablePos;
 295                     masm.emitInt(imm32);
 296                 } else {
 297                     label.addPatchAt(masm.position());
 298 
 299                     masm.emitByte(0); // pseudo-opcode for jump table entry
 300                     masm.emitShort(offsetToJumpTableBase);
 301                     masm.emitByte(0); // padding to make jump table entry 4 bytes wide
 302                 }
 303             }
 304 
 305             JumpTable jt = new JumpTable(jumpTablePos, lowKey, highKey, 4);
 306             crb.compilationResult.addAnnotation(jt);
 307         }
 308     }
 309 
 310     @Opcode("CMOVE")
 311     public static final class CondMoveOp extends AMD64LIRInstruction {
 312         public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
 313         @Def({REG, HINT}) protected Value result;
 314         @Alive({REG}) protected Value trueValue;
 315         @Use({REG, STACK, CONST}) protected Value falseValue;
 316         private final ConditionFlag condition;
 317 
 318         public CondMoveOp(Variable result, Condition condition, AllocatableValue trueValue, Value falseValue) {
 319             super(TYPE);
 320             this.result = result;
 321             this.condition = intCond(condition);
 322             this.trueValue = trueValue;
 323             this.falseValue = falseValue;
 324         }
 325 
 326         @Override
 327         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 328             cmove(crb, masm, result, false, condition, false, trueValue, falseValue);
 329         }
 330     }
 331 
 332     @Opcode("CMOVE")
 333     public static final class FloatCondMoveOp extends AMD64LIRInstruction {
 334         public static final LIRInstructionClass<FloatCondMoveOp> TYPE = LIRInstructionClass.create(FloatCondMoveOp.class);
 335         @Def({REG}) protected Value result;
 336         @Alive({REG}) protected Value trueValue;
 337         @Alive({REG}) protected Value falseValue;
 338         private final ConditionFlag condition;
 339         private final boolean unorderedIsTrue;
 340 
 341         public FloatCondMoveOp(Variable result, Condition condition, boolean unorderedIsTrue, Variable trueValue, Variable falseValue) {
 342             super(TYPE);
 343             this.result = result;
 344             this.condition = floatCond(condition);
 345             this.unorderedIsTrue = unorderedIsTrue;
 346             this.trueValue = trueValue;
 347             this.falseValue = falseValue;
 348         }
 349 
 350         @Override
 351         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 352             cmove(crb, masm, result, true, condition, unorderedIsTrue, trueValue, falseValue);
 353         }
 354     }
 355 
 356     private static void floatJcc(AMD64MacroAssembler masm, ConditionFlag condition, boolean unorderedIsTrue, Label label) {
 357         Label endLabel = new Label();
 358         if (unorderedIsTrue && !trueOnUnordered(condition)) {
 359             masm.jcc(ConditionFlag.Parity, label);
 360         } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 361             masm.jccb(ConditionFlag.Parity, endLabel);
 362         }
 363         masm.jcc(condition, label);
 364         masm.bind(endLabel);
 365     }
 366 
 367     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, boolean isFloat, ConditionFlag condition, boolean unorderedIsTrue, Value trueValue,
 368                     Value falseValue) {
 369         // check that we don't overwrite an input operand before it is used.
 370         assert !result.equals(trueValue);
 371 
 372         AMD64Move.move(crb, masm, result, falseValue);
 373         cmove(crb, masm, result, condition, trueValue);
 374 
 375         if (isFloat) {
 376             if (unorderedIsTrue && !trueOnUnordered(condition)) {
 377                 cmove(crb, masm, result, ConditionFlag.Parity, trueValue);
 378             } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 379                 cmove(crb, masm, result, ConditionFlag.Parity, falseValue);
 380             }
 381         }
 382     }
 383 
 384     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, ConditionFlag cond, Value other) {
 385         if (isRegister(other)) {
 386             assert !asRegister(other).equals(asRegister(result)) : "other already overwritten by previous move";
 387             switch ((AMD64Kind) other.getPlatformKind()) {
 388                 case BYTE:
 389                 case WORD:
 390                 case DWORD:
 391                     masm.cmovl(cond, asRegister(result), asRegister(other));
 392                     break;
 393                 case QWORD:
 394                     masm.cmovq(cond, asRegister(result), asRegister(other));
 395                     break;
 396                 default:
 397                     throw GraalError.shouldNotReachHere();
 398             }
 399         } else {
 400             AMD64Address addr = (AMD64Address) crb.asAddress(other);
 401             switch ((AMD64Kind) other.getPlatformKind()) {
 402                 case BYTE:
 403                 case WORD:
 404                 case DWORD:
 405                     masm.cmovl(cond, asRegister(result), addr);
 406                     break;
 407                 case QWORD:
 408                     masm.cmovq(cond, asRegister(result), addr);
 409                     break;
 410                 default:
 411                     throw GraalError.shouldNotReachHere();
 412             }
 413         }
 414     }
 415 
 416     private static ConditionFlag intCond(Condition cond) {
 417         switch (cond) {
 418             case EQ:
 419                 return ConditionFlag.Equal;
 420             case NE:
 421                 return ConditionFlag.NotEqual;
 422             case LT:
 423                 return ConditionFlag.Less;
 424             case LE:
 425                 return ConditionFlag.LessEqual;
 426             case GE:
 427                 return ConditionFlag.GreaterEqual;
 428             case GT:
 429                 return ConditionFlag.Greater;
 430             case BE:
 431                 return ConditionFlag.BelowEqual;
 432             case AE:
 433                 return ConditionFlag.AboveEqual;
 434             case AT:
 435                 return ConditionFlag.Above;
 436             case BT:
 437                 return ConditionFlag.Below;
 438             default:
 439                 throw GraalError.shouldNotReachHere();
 440         }
 441     }
 442 
 443     private static ConditionFlag floatCond(Condition cond) {
 444         switch (cond) {
 445             case EQ:
 446                 return ConditionFlag.Equal;
 447             case NE:
 448                 return ConditionFlag.NotEqual;
 449             case LT:
 450                 return ConditionFlag.Below;
 451             case LE:
 452                 return ConditionFlag.BelowEqual;
 453             case GE:
 454                 return ConditionFlag.AboveEqual;
 455             case GT:
 456                 return ConditionFlag.Above;
 457             default:
 458                 throw GraalError.shouldNotReachHere();
 459         }
 460     }
 461 
 462     private static boolean trueOnUnordered(ConditionFlag condition) {
 463         switch (condition) {
 464             case AboveEqual:
 465             case NotEqual:
 466             case Above:
 467             case Less:
 468             case Overflow:
 469                 return false;
 470             case Equal:
 471             case BelowEqual:
 472             case Below:
 473             case GreaterEqual:
 474             case NoOverflow:
 475                 return true;
 476             default:
 477                 throw GraalError.shouldNotReachHere();
 478         }
 479     }
 480 }