1 /*
   2  * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.amd64;
  24 
  25 import static jdk.vm.ci.code.ValueUtil.asRegister;
  26 import static jdk.vm.ci.code.ValueUtil.isRegister;
  27 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  28 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  30 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  31 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  32 
  33 import org.graalvm.compiler.asm.Label;
  34 import org.graalvm.compiler.asm.amd64.AMD64Address;
  35 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  36 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  37 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  38 import org.graalvm.compiler.code.CompilationResult.JumpTable;
  39 import org.graalvm.compiler.core.common.NumUtil;
  40 import org.graalvm.compiler.core.common.calc.Condition;
  41 import org.graalvm.compiler.debug.GraalError;
  42 import org.graalvm.compiler.lir.LIRInstructionClass;
  43 import org.graalvm.compiler.lir.LabelRef;
  44 import org.graalvm.compiler.lir.Opcode;
  45 import org.graalvm.compiler.lir.StandardOp;
  46 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
  47 import org.graalvm.compiler.lir.SwitchStrategy;
  48 import org.graalvm.compiler.lir.SwitchStrategy.BaseSwitchClosure;
  49 import org.graalvm.compiler.lir.Variable;
  50 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  51 
  52 import jdk.vm.ci.amd64.AMD64;
  53 import jdk.vm.ci.amd64.AMD64.CPUFeature;
  54 import jdk.vm.ci.amd64.AMD64Kind;
  55 import jdk.vm.ci.code.Register;
  56 import jdk.vm.ci.meta.AllocatableValue;
  57 import jdk.vm.ci.meta.Constant;
  58 import jdk.vm.ci.meta.JavaConstant;
  59 import jdk.vm.ci.meta.Value;
  60 
  61 public class AMD64ControlFlow {
  62 
  63     public static final class ReturnOp extends AMD64BlockEndOp implements BlockEndOp {
  64         public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
  65         @Use({REG, ILLEGAL}) protected Value x;
  66 
  67         public ReturnOp(Value x) {
  68             super(TYPE);
  69             this.x = x;
  70         }
  71 
  72         @Override
  73         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  74             crb.frameContext.leave(crb);
  75             /*
  76              * We potentially return to the interpreter, and that's an AVX-SSE transition. The only
  77              * live value at this point should be the return value in either rax, or in xmm0 with
  78              * the upper half of the register unused, so we don't destroy any value here.
  79              */
  80             if (masm.supports(CPUFeature.AVX)) {
  81                 masm.vzeroupper();
  82             }
  83             masm.ret(0);
  84         }
  85     }
  86 
  87     public static class BranchOp extends AMD64BlockEndOp implements StandardOp.BranchOp {
  88         public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
  89         protected final ConditionFlag condition;
  90         protected final LabelRef trueDestination;
  91         protected final LabelRef falseDestination;
  92 
  93         private final double trueDestinationProbability;
  94 
  95         public BranchOp(Condition condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
  96             this(intCond(condition), trueDestination, falseDestination, trueDestinationProbability);
  97         }
  98 
  99         public BranchOp(ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 100             this(TYPE, condition, trueDestination, falseDestination, trueDestinationProbability);
 101         }
 102 
 103         protected BranchOp(LIRInstructionClass<? extends BranchOp> c, ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 104             super(c);
 105             this.condition = condition;
 106             this.trueDestination = trueDestination;
 107             this.falseDestination = falseDestination;
 108             this.trueDestinationProbability = trueDestinationProbability;
 109         }
 110 
 111         @Override
 112         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 113             boolean isNegated = false;
 114             int jccPos = masm.position();
 115             /*
 116              * The strategy for emitting jumps is: If either trueDestination or falseDestination is
 117              * the successor block, assume the block scheduler did the correct thing and jcc to the
 118              * other. Otherwise, we need a jcc followed by a jmp. Use the branch probability to make
 119              * sure it is more likely to branch on the jcc (= less likely to execute both the jcc
 120              * and the jmp instead of just the jcc). In the case of loops, that means the jcc is the
 121              * back-edge.
 122              */
 123             if (crb.isSuccessorEdge(trueDestination)) {
 124                 jcc(masm, true, falseDestination);
 125                 isNegated = true;
 126             } else if (crb.isSuccessorEdge(falseDestination)) {
 127                 jcc(masm, false, trueDestination);
 128             } else if (trueDestinationProbability < 0.5) {
 129                 jcc(masm, true, falseDestination);
 130                 masm.jmp(trueDestination.label());
 131                 isNegated = true;
 132             } else {
 133                 jcc(masm, false, trueDestination);
 134                 masm.jmp(falseDestination.label());
 135             }
 136             crb.recordBranch(jccPos, isNegated);
 137         }
 138 
 139         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 140             masm.jcc(negate ? condition.negate() : condition, target.label());
 141         }
 142     }
 143 
 144     public static final class FloatBranchOp extends BranchOp {
 145         public static final LIRInstructionClass<FloatBranchOp> TYPE = LIRInstructionClass.create(FloatBranchOp.class);
 146         protected boolean unorderedIsTrue;
 147 
 148         public FloatBranchOp(Condition condition, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 149             super(TYPE, floatCond(condition), trueDestination, falseDestination, trueDestinationProbability);
 150             this.unorderedIsTrue = unorderedIsTrue;
 151         }
 152 
 153         @Override
 154         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 155             floatJcc(masm, negate ? condition.negate() : condition, negate ? !unorderedIsTrue : unorderedIsTrue, target.label());
 156         }
 157     }
 158 
 159     public static class StrategySwitchOp extends AMD64BlockEndOp {
 160         public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
 161         protected final Constant[] keyConstants;
 162         private final LabelRef[] keyTargets;
 163         private LabelRef defaultTarget;
 164         @Alive({REG}) protected Value key;
 165         @Temp({REG, ILLEGAL}) protected Value scratch;
 166         protected final SwitchStrategy strategy;
 167 
 168         public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 169             this(TYPE, strategy, keyTargets, defaultTarget, key, scratch);
 170         }
 171 
 172         protected StrategySwitchOp(LIRInstructionClass<? extends StrategySwitchOp> c, SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 173             super(c);
 174             this.strategy = strategy;
 175             this.keyConstants = strategy.getKeyConstants();
 176             this.keyTargets = keyTargets;
 177             this.defaultTarget = defaultTarget;
 178             this.key = key;
 179             this.scratch = scratch;
 180             assert keyConstants.length == keyTargets.length;
 181             assert keyConstants.length == strategy.keyProbabilities.length;
 182         }
 183 
 184         @Override
 185         public void emitCode(final CompilationResultBuilder crb, final AMD64MacroAssembler masm) {
 186             strategy.run(new SwitchClosure(asRegister(key), crb, masm));
 187         }
 188 
 189         public class SwitchClosure extends BaseSwitchClosure {
 190 
 191             protected final Register keyRegister;
 192             protected final CompilationResultBuilder crb;
 193             protected final AMD64MacroAssembler masm;
 194 
 195             protected SwitchClosure(Register keyRegister, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 196                 super(crb, masm, keyTargets, defaultTarget);
 197                 this.keyRegister = keyRegister;
 198                 this.crb = crb;
 199                 this.masm = masm;
 200             }
 201 
 202             protected void emitComparison(Constant c) {
 203                 JavaConstant jc = (JavaConstant) c;
 204                 switch (jc.getJavaKind()) {
 205                     case Int:
 206                         long lc = jc.asLong();
 207                         assert NumUtil.isInt(lc);
 208                         masm.cmpl(keyRegister, (int) lc);
 209                         break;
 210                     case Long:
 211                         masm.cmpq(keyRegister, (AMD64Address) crb.asLongConstRef(jc));
 212                         break;
 213                     case Object:
 214                         AMD64Move.const2reg(crb, masm, asRegister(scratch), jc);
 215                         masm.cmpptr(keyRegister, asRegister(scratch));
 216                         break;
 217                     default:
 218                         throw new GraalError("switch only supported for int, long and object");
 219                 }
 220             }
 221 
 222             @Override
 223             protected void conditionalJump(int index, Condition condition, Label target) {
 224                 emitComparison(keyConstants[index]);
 225                 masm.jcc(intCond(condition), target);
 226             }
 227         }
 228     }
 229 
 230     public static final class TableSwitchOp extends AMD64BlockEndOp {
 231         public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
 232         private final int lowKey;
 233         private final LabelRef defaultTarget;
 234         private final LabelRef[] targets;
 235         @Use protected Value index;
 236         @Temp({REG, HINT}) protected Value idxScratch;
 237         @Temp protected Value scratch;
 238 
 239         public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) {
 240             super(TYPE);
 241             this.lowKey = lowKey;
 242             this.defaultTarget = defaultTarget;
 243             this.targets = targets;
 244             this.index = index;
 245             this.scratch = scratch;
 246             this.idxScratch = idxScratch;
 247         }
 248 
 249         @Override
 250         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 251             Register indexReg = asRegister(index, AMD64Kind.DWORD);
 252             Register idxScratchReg = asRegister(idxScratch, AMD64Kind.DWORD);
 253             Register scratchReg = asRegister(scratch, AMD64Kind.QWORD);
 254 
 255             if (!indexReg.equals(idxScratchReg)) {
 256                 masm.movl(idxScratchReg, indexReg);
 257             }
 258 
 259             // Compare index against jump table bounds
 260             int highKey = lowKey + targets.length - 1;
 261             if (lowKey != 0) {
 262                 // subtract the low value from the switch value
 263                 masm.subl(idxScratchReg, lowKey);
 264                 masm.cmpl(idxScratchReg, highKey - lowKey);
 265             } else {
 266                 masm.cmpl(idxScratchReg, highKey);
 267             }
 268 
 269             // Jump to default target if index is not within the jump table
 270             if (defaultTarget != null) {
 271                 masm.jcc(ConditionFlag.Above, defaultTarget.label());
 272             }
 273 
 274             // Set scratch to address of jump table
 275             masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
 276             final int afterLea = masm.position();
 277 
 278             // Load jump table entry into scratch and jump to it
 279             masm.movslq(idxScratchReg, new AMD64Address(scratchReg, idxScratchReg, Scale.Times4, 0));
 280             masm.addq(scratchReg, idxScratchReg);
 281             masm.jmp(scratchReg);
 282 
 283             // Inserting padding so that jump table address is 4-byte aligned
 284             if ((masm.position() & 0x3) != 0) {
 285                 masm.nop(4 - (masm.position() & 0x3));
 286             }
 287 
 288             // Patch LEA instruction above now that we know the position of the jump table
 289             // TODO this is ugly and should be done differently
 290             final int jumpTablePos = masm.position();
 291             final int leaDisplacementPosition = afterLea - 4;
 292             masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
 293 
 294             // Emit jump table entries
 295             for (LabelRef target : targets) {
 296                 Label label = target.label();
 297                 int offsetToJumpTableBase = masm.position() - jumpTablePos;
 298                 if (label.isBound()) {
 299                     int imm32 = label.position() - jumpTablePos;
 300                     masm.emitInt(imm32);
 301                 } else {
 302                     label.addPatchAt(masm.position());
 303 
 304                     masm.emitByte(0); // pseudo-opcode for jump table entry
 305                     masm.emitShort(offsetToJumpTableBase);
 306                     masm.emitByte(0); // padding to make jump table entry 4 bytes wide
 307                 }
 308             }
 309 
 310             JumpTable jt = new JumpTable(jumpTablePos, lowKey, highKey, 4);
 311             crb.compilationResult.addAnnotation(jt);
 312         }
 313     }
 314 
 315     @Opcode("SETcc")
 316     public static final class CondSetOp extends AMD64LIRInstruction {
 317         public static final LIRInstructionClass<CondSetOp> TYPE = LIRInstructionClass.create(CondSetOp.class);
 318         @Def({REG, HINT}) protected Value result;
 319         private final ConditionFlag condition;
 320 
 321         public CondSetOp(Variable result, Condition condition) {
 322             super(TYPE);
 323             this.result = result;
 324             this.condition = intCond(condition);
 325         }
 326 
 327         @Override
 328         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 329             setcc(masm, result, condition);
 330         }
 331     }
 332 
 333     @Opcode("SETcc")
 334     public static final class FloatCondSetOp extends AMD64LIRInstruction {
 335         public static final LIRInstructionClass<FloatCondSetOp> TYPE = LIRInstructionClass.create(FloatCondSetOp.class);
 336         @Def({REG, HINT}) protected Value result;
 337         private final ConditionFlag condition;
 338 
 339         public FloatCondSetOp(Variable result, Condition condition) {
 340             super(TYPE);
 341             this.result = result;
 342             this.condition = floatCond(condition);
 343         }
 344 
 345         @Override
 346         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 347             setcc(masm, result, condition);
 348         }
 349     }
 350 
 351     @Opcode("CMOVE")
 352     public static final class CondMoveOp extends AMD64LIRInstruction {
 353         public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
 354         @Def({REG, HINT}) protected Value result;
 355         @Alive({REG}) protected Value trueValue;
 356         @Use({REG, STACK, CONST}) protected Value falseValue;
 357         private final ConditionFlag condition;
 358 
 359         public CondMoveOp(Variable result, Condition condition, AllocatableValue trueValue, Value falseValue) {
 360             super(TYPE);
 361             this.result = result;
 362             this.condition = intCond(condition);
 363             this.trueValue = trueValue;
 364             this.falseValue = falseValue;
 365         }
 366 
 367         @Override
 368         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 369             cmove(crb, masm, result, false, condition, false, trueValue, falseValue);
 370         }
 371     }
 372 
 373     @Opcode("CMOVE")
 374     public static final class FloatCondMoveOp extends AMD64LIRInstruction {
 375         public static final LIRInstructionClass<FloatCondMoveOp> TYPE = LIRInstructionClass.create(FloatCondMoveOp.class);
 376         @Def({REG}) protected Value result;
 377         @Alive({REG}) protected Value trueValue;
 378         @Alive({REG}) protected Value falseValue;
 379         private final ConditionFlag condition;
 380         private final boolean unorderedIsTrue;
 381 
 382         public FloatCondMoveOp(Variable result, Condition condition, boolean unorderedIsTrue, Variable trueValue, Variable falseValue) {
 383             super(TYPE);
 384             this.result = result;
 385             this.condition = floatCond(condition);
 386             this.unorderedIsTrue = unorderedIsTrue;
 387             this.trueValue = trueValue;
 388             this.falseValue = falseValue;
 389         }
 390 
 391         @Override
 392         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 393             cmove(crb, masm, result, true, condition, unorderedIsTrue, trueValue, falseValue);
 394         }
 395     }
 396 
 397     private static void floatJcc(AMD64MacroAssembler masm, ConditionFlag condition, boolean unorderedIsTrue, Label label) {
 398         Label endLabel = new Label();
 399         if (unorderedIsTrue && !trueOnUnordered(condition)) {
 400             masm.jcc(ConditionFlag.Parity, label);
 401         } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 402             masm.jccb(ConditionFlag.Parity, endLabel);
 403         }
 404         masm.jcc(condition, label);
 405         masm.bind(endLabel);
 406     }
 407 
 408     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, boolean isFloat, ConditionFlag condition, boolean unorderedIsTrue, Value trueValue,
 409                     Value falseValue) {
 410         // check that we don't overwrite an input operand before it is used.
 411         assert !result.equals(trueValue);
 412 
 413         AMD64Move.move(crb, masm, result, falseValue);
 414         cmove(crb, masm, result, condition, trueValue);
 415 
 416         if (isFloat) {
 417             if (unorderedIsTrue && !trueOnUnordered(condition)) {
 418                 cmove(crb, masm, result, ConditionFlag.Parity, trueValue);
 419             } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 420                 cmove(crb, masm, result, ConditionFlag.Parity, falseValue);
 421             }
 422         }
 423     }
 424 
 425     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, ConditionFlag cond, Value other) {
 426         if (isRegister(other)) {
 427             assert !asRegister(other).equals(asRegister(result)) : "other already overwritten by previous move";
 428             switch ((AMD64Kind) other.getPlatformKind()) {
 429                 case BYTE:
 430                 case WORD:
 431                 case DWORD:
 432                     masm.cmovl(cond, asRegister(result), asRegister(other));
 433                     break;
 434                 case QWORD:
 435                     masm.cmovq(cond, asRegister(result), asRegister(other));
 436                     break;
 437                 default:
 438                     throw GraalError.shouldNotReachHere();
 439             }
 440         } else {
 441             AMD64Address addr = (AMD64Address) crb.asAddress(other);
 442             switch ((AMD64Kind) other.getPlatformKind()) {
 443                 case BYTE:
 444                 case WORD:
 445                 case DWORD:
 446                     masm.cmovl(cond, asRegister(result), addr);
 447                     break;
 448                 case QWORD:
 449                     masm.cmovq(cond, asRegister(result), addr);
 450                     break;
 451                 default:
 452                     throw GraalError.shouldNotReachHere();
 453             }
 454         }
 455     }
 456 
 457     private static void setcc(AMD64MacroAssembler masm, Value result, ConditionFlag cond) {
 458         switch ((AMD64Kind) result.getPlatformKind()) {
 459             case BYTE:
 460             case WORD:
 461             case DWORD:
 462                 masm.setl(cond, asRegister(result));
 463                 break;
 464             case QWORD:
 465                 masm.setq(cond, asRegister(result));
 466                 break;
 467             default:
 468                 throw GraalError.shouldNotReachHere();
 469         }
 470     }
 471 
 472     private static ConditionFlag intCond(Condition cond) {
 473         switch (cond) {
 474             case EQ:
 475                 return ConditionFlag.Equal;
 476             case NE:
 477                 return ConditionFlag.NotEqual;
 478             case LT:
 479                 return ConditionFlag.Less;
 480             case LE:
 481                 return ConditionFlag.LessEqual;
 482             case GE:
 483                 return ConditionFlag.GreaterEqual;
 484             case GT:
 485                 return ConditionFlag.Greater;
 486             case BE:
 487                 return ConditionFlag.BelowEqual;
 488             case AE:
 489                 return ConditionFlag.AboveEqual;
 490             case AT:
 491                 return ConditionFlag.Above;
 492             case BT:
 493                 return ConditionFlag.Below;
 494             default:
 495                 throw GraalError.shouldNotReachHere();
 496         }
 497     }
 498 
 499     private static ConditionFlag floatCond(Condition cond) {
 500         switch (cond) {
 501             case EQ:
 502                 return ConditionFlag.Equal;
 503             case NE:
 504                 return ConditionFlag.NotEqual;
 505             case LT:
 506                 return ConditionFlag.Below;
 507             case LE:
 508                 return ConditionFlag.BelowEqual;
 509             case GE:
 510                 return ConditionFlag.AboveEqual;
 511             case GT:
 512                 return ConditionFlag.Above;
 513             default:
 514                 throw GraalError.shouldNotReachHere();
 515         }
 516     }
 517 
 518     public static boolean trueOnUnordered(Condition condition) {
 519         return trueOnUnordered(floatCond(condition));
 520     }
 521 
 522     private static boolean trueOnUnordered(ConditionFlag condition) {
 523         switch (condition) {
 524             case AboveEqual:
 525             case NotEqual:
 526             case Above:
 527             case Less:
 528             case Overflow:
 529                 return false;
 530             case Equal:
 531             case BelowEqual:
 532             case Below:
 533             case GreaterEqual:
 534             case NoOverflow:
 535                 return true;
 536             default:
 537                 throw GraalError.shouldNotReachHere();
 538         }
 539     }
 540 }