1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.amd64;
  26 
  27 import static jdk.vm.ci.code.ValueUtil.asRegister;
  28 import static jdk.vm.ci.code.ValueUtil.isRegister;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  30 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  31 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  32 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  33 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  34 
  35 import org.graalvm.compiler.asm.Label;
  36 import org.graalvm.compiler.asm.amd64.AMD64Address;
  37 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  38 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  39 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  40 import org.graalvm.compiler.code.CompilationResult.JumpTable;
  41 import org.graalvm.compiler.core.common.NumUtil;
  42 import org.graalvm.compiler.core.common.calc.Condition;
  43 import org.graalvm.compiler.debug.GraalError;
  44 import org.graalvm.compiler.lir.LIRInstructionClass;
  45 import org.graalvm.compiler.lir.LabelRef;
  46 import org.graalvm.compiler.lir.Opcode;
  47 import org.graalvm.compiler.lir.StandardOp;
  48 import org.graalvm.compiler.lir.StandardOp.BlockEndOp;
  49 import org.graalvm.compiler.lir.SwitchStrategy;
  50 import org.graalvm.compiler.lir.SwitchStrategy.BaseSwitchClosure;
  51 import org.graalvm.compiler.lir.Variable;
  52 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  53 
  54 import jdk.vm.ci.amd64.AMD64;
  55 import jdk.vm.ci.amd64.AMD64.CPUFeature;
  56 import jdk.vm.ci.amd64.AMD64Kind;
  57 import jdk.vm.ci.code.Register;
  58 import jdk.vm.ci.meta.AllocatableValue;
  59 import jdk.vm.ci.meta.Constant;
  60 import jdk.vm.ci.meta.JavaConstant;
  61 import jdk.vm.ci.meta.Value;
  62 
  63 public class AMD64ControlFlow {
  64 
  65     public static final class ReturnOp extends AMD64BlockEndOp implements BlockEndOp {
  66         public static final LIRInstructionClass<ReturnOp> TYPE = LIRInstructionClass.create(ReturnOp.class);
  67         @Use({REG, ILLEGAL}) protected Value x;
  68 
  69         public ReturnOp(Value x) {
  70             super(TYPE);
  71             this.x = x;
  72         }
  73 
  74         @Override
  75         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  76             crb.frameContext.leave(crb);
  77             /*
  78              * We potentially return to the interpreter, and that's an AVX-SSE transition. The only
  79              * live value at this point should be the return value in either rax, or in xmm0 with
  80              * the upper half of the register unused, so we don't destroy any value here.
  81              */
  82             if (masm.supports(CPUFeature.AVX)) {
  83                 masm.vzeroupper();
  84             }
  85             masm.ret(0);
  86         }
  87     }
  88 
  89     public static class BranchOp extends AMD64BlockEndOp implements StandardOp.BranchOp {
  90         public static final LIRInstructionClass<BranchOp> TYPE = LIRInstructionClass.create(BranchOp.class);
  91         protected final ConditionFlag condition;
  92         protected final LabelRef trueDestination;
  93         protected final LabelRef falseDestination;
  94 
  95         private final double trueDestinationProbability;
  96 
  97         public BranchOp(Condition condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
  98             this(intCond(condition), trueDestination, falseDestination, trueDestinationProbability);
  99         }
 100 
 101         public BranchOp(ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 102             this(TYPE, condition, trueDestination, falseDestination, trueDestinationProbability);
 103         }
 104 
 105         protected BranchOp(LIRInstructionClass<? extends BranchOp> c, ConditionFlag condition, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 106             super(c);
 107             this.condition = condition;
 108             this.trueDestination = trueDestination;
 109             this.falseDestination = falseDestination;
 110             this.trueDestinationProbability = trueDestinationProbability;
 111         }
 112 
 113         @Override
 114         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 115             boolean isNegated = false;
 116             int jccPos = masm.position();
 117             /*
 118              * The strategy for emitting jumps is: If either trueDestination or falseDestination is
 119              * the successor block, assume the block scheduler did the correct thing and jcc to the
 120              * other. Otherwise, we need a jcc followed by a jmp. Use the branch probability to make
 121              * sure it is more likely to branch on the jcc (= less likely to execute both the jcc
 122              * and the jmp instead of just the jcc). In the case of loops, that means the jcc is the
 123              * back-edge.
 124              */
 125             if (crb.isSuccessorEdge(trueDestination)) {
 126                 jcc(masm, true, falseDestination);
 127                 isNegated = true;
 128             } else if (crb.isSuccessorEdge(falseDestination)) {
 129                 jcc(masm, false, trueDestination);
 130             } else if (trueDestinationProbability < 0.5) {
 131                 jcc(masm, true, falseDestination);
 132                 masm.jmp(trueDestination.label());
 133                 isNegated = true;
 134             } else {
 135                 jcc(masm, false, trueDestination);
 136                 masm.jmp(falseDestination.label());
 137             }
 138             crb.recordBranch(jccPos, isNegated);
 139         }
 140 
 141         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 142             masm.jcc(negate ? condition.negate() : condition, target.label());
 143         }
 144     }
 145 
 146     public static final class FloatBranchOp extends BranchOp {
 147         public static final LIRInstructionClass<FloatBranchOp> TYPE = LIRInstructionClass.create(FloatBranchOp.class);
 148         protected boolean unorderedIsTrue;
 149 
 150         public FloatBranchOp(Condition condition, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 151             super(TYPE, floatCond(condition), trueDestination, falseDestination, trueDestinationProbability);
 152             this.unorderedIsTrue = unorderedIsTrue;
 153         }
 154 
 155         @Override
 156         protected void jcc(AMD64MacroAssembler masm, boolean negate, LabelRef target) {
 157             floatJcc(masm, negate ? condition.negate() : condition, negate ? !unorderedIsTrue : unorderedIsTrue, target.label());
 158         }
 159     }
 160 
 161     public static class StrategySwitchOp extends AMD64BlockEndOp {
 162         public static final LIRInstructionClass<StrategySwitchOp> TYPE = LIRInstructionClass.create(StrategySwitchOp.class);
 163         protected final Constant[] keyConstants;
 164         private final LabelRef[] keyTargets;
 165         private LabelRef defaultTarget;
 166         @Alive({REG}) protected Value key;
 167         @Temp({REG, ILLEGAL}) protected Value scratch;
 168         protected final SwitchStrategy strategy;
 169 
 170         public StrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 171             this(TYPE, strategy, keyTargets, defaultTarget, key, scratch);
 172         }
 173 
 174         protected StrategySwitchOp(LIRInstructionClass<? extends StrategySwitchOp> c, SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Value key, Value scratch) {
 175             super(c);
 176             this.strategy = strategy;
 177             this.keyConstants = strategy.getKeyConstants();
 178             this.keyTargets = keyTargets;
 179             this.defaultTarget = defaultTarget;
 180             this.key = key;
 181             this.scratch = scratch;
 182             assert keyConstants.length == keyTargets.length;
 183             assert keyConstants.length == strategy.keyProbabilities.length;
 184         }
 185 
 186         @Override
 187         public void emitCode(final CompilationResultBuilder crb, final AMD64MacroAssembler masm) {
 188             strategy.run(new SwitchClosure(asRegister(key), crb, masm));
 189         }
 190 
 191         public class SwitchClosure extends BaseSwitchClosure {
 192 
 193             protected final Register keyRegister;
 194             protected final CompilationResultBuilder crb;
 195             protected final AMD64MacroAssembler masm;
 196 
 197             protected SwitchClosure(Register keyRegister, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 198                 super(crb, masm, keyTargets, defaultTarget);
 199                 this.keyRegister = keyRegister;
 200                 this.crb = crb;
 201                 this.masm = masm;
 202             }
 203 
 204             protected void emitComparison(Constant c) {
 205                 JavaConstant jc = (JavaConstant) c;
 206                 switch (jc.getJavaKind()) {
 207                     case Int:
 208                         long lc = jc.asLong();
 209                         assert NumUtil.isInt(lc);
 210                         masm.cmpl(keyRegister, (int) lc);
 211                         break;
 212                     case Long:
 213                         masm.cmpq(keyRegister, (AMD64Address) crb.asLongConstRef(jc));
 214                         break;
 215                     case Object:
 216                         AMD64Move.const2reg(crb, masm, asRegister(scratch), jc);
 217                         masm.cmpptr(keyRegister, asRegister(scratch));
 218                         break;
 219                     default:
 220                         throw new GraalError("switch only supported for int, long and object");
 221                 }
 222             }
 223 
 224             @Override
 225             protected void conditionalJump(int index, Condition condition, Label target) {
 226                 emitComparison(keyConstants[index]);
 227                 masm.jcc(intCond(condition), target);
 228             }
 229         }
 230     }
 231 
 232     public static final class TableSwitchOp extends AMD64BlockEndOp {
 233         public static final LIRInstructionClass<TableSwitchOp> TYPE = LIRInstructionClass.create(TableSwitchOp.class);
 234         private final int lowKey;
 235         private final LabelRef defaultTarget;
 236         private final LabelRef[] targets;
 237         @Use protected Value index;
 238         @Temp({REG, HINT}) protected Value idxScratch;
 239         @Temp protected Value scratch;
 240 
 241         public TableSwitchOp(final int lowKey, final LabelRef defaultTarget, final LabelRef[] targets, Value index, Variable scratch, Variable idxScratch) {
 242             super(TYPE);
 243             this.lowKey = lowKey;
 244             this.defaultTarget = defaultTarget;
 245             this.targets = targets;
 246             this.index = index;
 247             this.scratch = scratch;
 248             this.idxScratch = idxScratch;
 249         }
 250 
 251         @Override
 252         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 253             Register indexReg = asRegister(index, AMD64Kind.DWORD);
 254             Register idxScratchReg = asRegister(idxScratch, AMD64Kind.DWORD);
 255             Register scratchReg = asRegister(scratch, AMD64Kind.QWORD);
 256 
 257             if (!indexReg.equals(idxScratchReg)) {
 258                 masm.movl(idxScratchReg, indexReg);
 259             }
 260 
 261             // Compare index against jump table bounds
 262             int highKey = lowKey + targets.length - 1;
 263             if (lowKey != 0) {
 264                 // subtract the low value from the switch value
 265                 masm.subl(idxScratchReg, lowKey);
 266                 masm.cmpl(idxScratchReg, highKey - lowKey);
 267             } else {
 268                 masm.cmpl(idxScratchReg, highKey);
 269             }
 270 
 271             // Jump to default target if index is not within the jump table
 272             if (defaultTarget != null) {
 273                 masm.jcc(ConditionFlag.Above, defaultTarget.label());
 274             }
 275 
 276             // Set scratch to address of jump table
 277             masm.leaq(scratchReg, new AMD64Address(AMD64.rip, 0));
 278             final int afterLea = masm.position();
 279 
 280             // Load jump table entry into scratch and jump to it
 281             masm.movslq(idxScratchReg, new AMD64Address(scratchReg, idxScratchReg, Scale.Times4, 0));
 282             masm.addq(scratchReg, idxScratchReg);
 283             masm.jmp(scratchReg);
 284 
 285             // Inserting padding so that jump table address is 4-byte aligned
 286             if ((masm.position() & 0x3) != 0) {
 287                 masm.nop(4 - (masm.position() & 0x3));
 288             }
 289 
 290             // Patch LEA instruction above now that we know the position of the jump table
 291             // TODO this is ugly and should be done differently
 292             final int jumpTablePos = masm.position();
 293             final int leaDisplacementPosition = afterLea - 4;
 294             masm.emitInt(jumpTablePos - afterLea, leaDisplacementPosition);
 295 
 296             // Emit jump table entries
 297             for (LabelRef target : targets) {
 298                 Label label = target.label();
 299                 int offsetToJumpTableBase = masm.position() - jumpTablePos;
 300                 if (label.isBound()) {
 301                     int imm32 = label.position() - jumpTablePos;
 302                     masm.emitInt(imm32);
 303                 } else {
 304                     label.addPatchAt(masm.position());
 305 
 306                     masm.emitByte(0); // pseudo-opcode for jump table entry
 307                     masm.emitShort(offsetToJumpTableBase);
 308                     masm.emitByte(0); // padding to make jump table entry 4 bytes wide
 309                 }
 310             }
 311 
 312             JumpTable jt = new JumpTable(jumpTablePos, lowKey, highKey, 4);
 313             crb.compilationResult.addAnnotation(jt);
 314         }
 315     }
 316 
 317     @Opcode("SETcc")
 318     public static final class CondSetOp extends AMD64LIRInstruction {
 319         public static final LIRInstructionClass<CondSetOp> TYPE = LIRInstructionClass.create(CondSetOp.class);
 320         @Def({REG, HINT}) protected Value result;
 321         private final ConditionFlag condition;
 322 
 323         public CondSetOp(Variable result, Condition condition) {
 324             super(TYPE);
 325             this.result = result;
 326             this.condition = intCond(condition);
 327         }
 328 
 329         @Override
 330         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 331             setcc(masm, result, condition);
 332         }
 333     }
 334 
 335     @Opcode("SETcc")
 336     public static final class FloatCondSetOp extends AMD64LIRInstruction {
 337         public static final LIRInstructionClass<FloatCondSetOp> TYPE = LIRInstructionClass.create(FloatCondSetOp.class);
 338         @Def({REG, HINT}) protected Value result;
 339         private final ConditionFlag condition;
 340 
 341         public FloatCondSetOp(Variable result, Condition condition) {
 342             super(TYPE);
 343             this.result = result;
 344             this.condition = floatCond(condition);
 345         }
 346 
 347         @Override
 348         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 349             setcc(masm, result, condition);
 350         }
 351     }
 352 
 353     @Opcode("CMOVE")
 354     public static final class CondMoveOp extends AMD64LIRInstruction {
 355         public static final LIRInstructionClass<CondMoveOp> TYPE = LIRInstructionClass.create(CondMoveOp.class);
 356         @Def({REG, HINT}) protected Value result;
 357         @Alive({REG}) protected Value trueValue;
 358         @Use({REG, STACK, CONST}) protected Value falseValue;
 359         private final ConditionFlag condition;
 360 
 361         public CondMoveOp(Variable result, Condition condition, AllocatableValue trueValue, Value falseValue) {
 362             super(TYPE);
 363             this.result = result;
 364             this.condition = intCond(condition);
 365             this.trueValue = trueValue;
 366             this.falseValue = falseValue;
 367         }
 368 
 369         @Override
 370         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 371             cmove(crb, masm, result, false, condition, false, trueValue, falseValue);
 372         }
 373     }
 374 
 375     @Opcode("CMOVE")
 376     public static final class FloatCondMoveOp extends AMD64LIRInstruction {
 377         public static final LIRInstructionClass<FloatCondMoveOp> TYPE = LIRInstructionClass.create(FloatCondMoveOp.class);
 378         @Def({REG}) protected Value result;
 379         @Alive({REG}) protected Value trueValue;
 380         @Alive({REG}) protected Value falseValue;
 381         private final ConditionFlag condition;
 382         private final boolean unorderedIsTrue;
 383 
 384         public FloatCondMoveOp(Variable result, Condition condition, boolean unorderedIsTrue, Variable trueValue, Variable falseValue) {
 385             super(TYPE);
 386             this.result = result;
 387             this.condition = floatCond(condition);
 388             this.unorderedIsTrue = unorderedIsTrue;
 389             this.trueValue = trueValue;
 390             this.falseValue = falseValue;
 391         }
 392 
 393         @Override
 394         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 395             cmove(crb, masm, result, true, condition, unorderedIsTrue, trueValue, falseValue);
 396         }
 397     }
 398 
 399     private static void floatJcc(AMD64MacroAssembler masm, ConditionFlag condition, boolean unorderedIsTrue, Label label) {
 400         Label endLabel = new Label();
 401         if (unorderedIsTrue && !trueOnUnordered(condition)) {
 402             masm.jcc(ConditionFlag.Parity, label);
 403         } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 404             masm.jccb(ConditionFlag.Parity, endLabel);
 405         }
 406         masm.jcc(condition, label);
 407         masm.bind(endLabel);
 408     }
 409 
 410     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, boolean isFloat, ConditionFlag condition, boolean unorderedIsTrue, Value trueValue,
 411                     Value falseValue) {
 412         // check that we don't overwrite an input operand before it is used.
 413         assert !result.equals(trueValue);
 414 
 415         AMD64Move.move(crb, masm, result, falseValue);
 416         cmove(crb, masm, result, condition, trueValue);
 417 
 418         if (isFloat) {
 419             if (unorderedIsTrue && !trueOnUnordered(condition)) {
 420                 cmove(crb, masm, result, ConditionFlag.Parity, trueValue);
 421             } else if (!unorderedIsTrue && trueOnUnordered(condition)) {
 422                 cmove(crb, masm, result, ConditionFlag.Parity, falseValue);
 423             }
 424         }
 425     }
 426 
 427     private static void cmove(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, ConditionFlag cond, Value other) {
 428         if (isRegister(other)) {
 429             assert !asRegister(other).equals(asRegister(result)) : "other already overwritten by previous move";
 430             switch ((AMD64Kind) other.getPlatformKind()) {
 431                 case BYTE:
 432                 case WORD:
 433                 case DWORD:
 434                     masm.cmovl(cond, asRegister(result), asRegister(other));
 435                     break;
 436                 case QWORD:
 437                     masm.cmovq(cond, asRegister(result), asRegister(other));
 438                     break;
 439                 default:
 440                     throw GraalError.shouldNotReachHere();
 441             }
 442         } else {
 443             AMD64Address addr = (AMD64Address) crb.asAddress(other);
 444             switch ((AMD64Kind) other.getPlatformKind()) {
 445                 case BYTE:
 446                 case WORD:
 447                 case DWORD:
 448                     masm.cmovl(cond, asRegister(result), addr);
 449                     break;
 450                 case QWORD:
 451                     masm.cmovq(cond, asRegister(result), addr);
 452                     break;
 453                 default:
 454                     throw GraalError.shouldNotReachHere();
 455             }
 456         }
 457     }
 458 
 459     private static void setcc(AMD64MacroAssembler masm, Value result, ConditionFlag cond) {
 460         switch ((AMD64Kind) result.getPlatformKind()) {
 461             case BYTE:
 462             case WORD:
 463             case DWORD:
 464                 masm.setl(cond, asRegister(result));
 465                 break;
 466             case QWORD:
 467                 masm.setq(cond, asRegister(result));
 468                 break;
 469             default:
 470                 throw GraalError.shouldNotReachHere();
 471         }
 472     }
 473 
 474     private static ConditionFlag intCond(Condition cond) {
 475         switch (cond) {
 476             case EQ:
 477                 return ConditionFlag.Equal;
 478             case NE:
 479                 return ConditionFlag.NotEqual;
 480             case LT:
 481                 return ConditionFlag.Less;
 482             case LE:
 483                 return ConditionFlag.LessEqual;
 484             case GE:
 485                 return ConditionFlag.GreaterEqual;
 486             case GT:
 487                 return ConditionFlag.Greater;
 488             case BE:
 489                 return ConditionFlag.BelowEqual;
 490             case AE:
 491                 return ConditionFlag.AboveEqual;
 492             case AT:
 493                 return ConditionFlag.Above;
 494             case BT:
 495                 return ConditionFlag.Below;
 496             default:
 497                 throw GraalError.shouldNotReachHere();
 498         }
 499     }
 500 
 501     private static ConditionFlag floatCond(Condition cond) {
 502         switch (cond) {
 503             case EQ:
 504                 return ConditionFlag.Equal;
 505             case NE:
 506                 return ConditionFlag.NotEqual;
 507             case LT:
 508                 return ConditionFlag.Below;
 509             case LE:
 510                 return ConditionFlag.BelowEqual;
 511             case GE:
 512                 return ConditionFlag.AboveEqual;
 513             case GT:
 514                 return ConditionFlag.Above;
 515             default:
 516                 throw GraalError.shouldNotReachHere();
 517         }
 518     }
 519 
 520     public static boolean trueOnUnordered(Condition condition) {
 521         return trueOnUnordered(floatCond(condition));
 522     }
 523 
 524     private static boolean trueOnUnordered(ConditionFlag condition) {
 525         switch (condition) {
 526             case AboveEqual:
 527             case NotEqual:
 528             case Above:
 529             case Less:
 530             case Overflow:
 531                 return false;
 532             case Equal:
 533             case BelowEqual:
 534             case Below:
 535             case GreaterEqual:
 536             case NoOverflow:
 537                 return true;
 538             default:
 539                 throw GraalError.shouldNotReachHere();
 540         }
 541     }
 542 }