< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LIRGenerator.java

Print this page




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 package org.graalvm.compiler.core.amd64;
  25 
  26 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue;
  27 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
  28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD;
  29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PD;
  30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PS;
  31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD;
  32 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  33 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
  34 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  35 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;

  36 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  37 
  38 import org.graalvm.compiler.core.common.NumUtil;
  39 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
  41 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  42 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
  43 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
  44 import org.graalvm.compiler.core.common.LIRKind;

  45 import org.graalvm.compiler.core.common.calc.Condition;
  46 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
  47 import org.graalvm.compiler.core.common.spi.LIRKindTool;
  48 import org.graalvm.compiler.debug.GraalError;
  49 import org.graalvm.compiler.lir.ConstantValue;
  50 import org.graalvm.compiler.lir.LIRFrameState;
  51 import org.graalvm.compiler.lir.LIRInstruction;
  52 import org.graalvm.compiler.lir.LIRValueUtil;
  53 import org.graalvm.compiler.lir.LabelRef;
  54 import org.graalvm.compiler.lir.StandardOp.JumpOp;
  55 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp;
  56 import org.graalvm.compiler.lir.SwitchStrategy;
  57 import org.graalvm.compiler.lir.Variable;
  58 import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
  59 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
  60 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;

  61 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
  62 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
  63 import org.graalvm.compiler.lir.amd64.AMD64Call;

  64 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
  65 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;

  66 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
  67 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp;

  68 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
  69 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
  70 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
  71 import org.graalvm.compiler.lir.amd64.AMD64Move;
  72 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
  73 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
  74 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp;
  75 import org.graalvm.compiler.lir.amd64.AMD64PauseOp;
  76 import org.graalvm.compiler.lir.amd64.AMD64StringIndexOfOp;
  77 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp;
  78 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp;
  79 import org.graalvm.compiler.lir.gen.LIRGenerationResult;
  80 import org.graalvm.compiler.lir.gen.LIRGenerator;
  81 import org.graalvm.compiler.phases.util.Providers;
  82 
  83 import jdk.vm.ci.amd64.AMD64;
  84 import jdk.vm.ci.amd64.AMD64Kind;
  85 import jdk.vm.ci.code.CallingConvention;
  86 import jdk.vm.ci.code.Register;
  87 import jdk.vm.ci.code.RegisterValue;


 240         ValueKind<?> kind = newValue.getValueKind();
 241         Variable result = newVariable(kind);
 242         AMD64AddressValue addressValue = asAddressValue(address);
 243         append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue)));
 244         return result;
 245     }
 246 
 247     @Override
 248     public void emitNullCheck(Value address, LIRFrameState state) {
 249         append(new AMD64Move.NullCheckOp(asAddressValue(address), state));
 250     }
 251 
 252     @Override
 253     public void emitJump(LabelRef label) {
 254         assert label != null;
 255         append(new JumpOp(label));
 256     }
 257 
 258     @Override
 259     public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) {
 260         boolean mirrored = emitCompare(cmpKind, left, right);
 261         Condition finalCondition = mirrored ? cond.mirror() : cond;
 262         if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
 263             append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
 264         } else {
 265             append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
 266         }
 267     }
 268 
 269     public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel,
 270                     double trueLabelProbability) {
 271         boolean mirrored = emitCompareMemory(cmpKind, left, right, state);
 272         Condition finalCondition = mirrored ? cond.mirror() : cond;
 273         if (cmpKind.isXMM()) {
 274             append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
 275         } else {
 276             append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
 277         }
 278     }
 279 
 280     @Override
 281     public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) {
 282         append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability));
 283     }
 284 
 285     @Override
 286     public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 287         emitIntegerTest(left, right);
 288         append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
 289     }
 290 
 291     @Override
 292     public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
 293         boolean mirrored = emitCompare(cmpKind, left, right);
 294         Condition finalCondition = mirrored ? cond.mirror() : cond;
 295 
 296         Variable result = newVariable(trueValue.getValueKind());
 297         if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
 298             append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(trueValue), load(falseValue)));










































 299         } else {
 300             append(new CondMoveOp(result, finalCondition, load(trueValue), loadNonConst(falseValue)));





 301         }
 302         return result;
 303     }
 304 
 305     @Override
 306     public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
 307         emitIntegerTest(left, right);
 308         Variable result = newVariable(trueValue.getValueKind());
 309         append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue)));
 310         return result;
 311     }
 312 
 313     private void emitIntegerTest(Value a, Value b) {
 314         assert ((AMD64Kind) a.getPlatformKind()).isInteger();
 315         OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
 316         if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) {
 317             append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong()));
 318         } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) {
 319             append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong()));
 320         } else if (isAllocatableValue(b)) {


 377                 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state));
 378                 return true;
 379             } else {
 380                 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
 381             }
 382         }
 383     }
 384 
 385     private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) {
 386         AMD64RMOp op = CMP.getRMOpcode(size);
 387         append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state));
 388         return false;
 389     }
 390 
 391     /**
 392      * This method emits the compare instruction, and may reorder the operands. It returns true if
 393      * it did so.
 394      *
 395      * @param a the left operand of the comparison
 396      * @param b the right operand of the comparison

 397      * @return true if the left and right operands were switched, false otherwise
 398      */
 399     private boolean emitCompare(PlatformKind cmpKind, Value a, Value b) {
 400         Variable left;
 401         Value right;
 402         boolean mirrored;
 403         if (LIRValueUtil.isVariable(b)) {
 404             left = load(b);
 405             right = loadNonConst(a);
 406             mirrored = true;
 407         } else {
 408             left = load(a);
 409             right = loadNonConst(b);
 410             mirrored = false;
 411         }
 412         ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, left, right);
 413         return mirrored;


 414     }
 415 
 416     @Override
 417     public void emitMembar(int barriers) {
 418         int necessaryBarriers = target().arch.requiredBarriers(barriers);
 419         if (target().isMP && necessaryBarriers != 0) {
 420             append(new MembarOp(necessaryBarriers));
 421         }
 422     }
 423 
 424     public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments);
 425 
 426     @Override
 427     protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) {
 428         long maxOffset = linkage.getMaxCallTargetOffset();
 429         if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) {
 430             append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info));
 431         } else {
 432             append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info));
 433         }




  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 package org.graalvm.compiler.core.amd64;
  25 
  26 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue;
  27 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
  28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD;
  29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PD;
  30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PS;
  31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD;
  32 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  33 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
  34 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  35 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
  36 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant;
  37 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  38 
  39 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
  40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  41 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
  42 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  43 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
  44 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
  45 import org.graalvm.compiler.core.common.LIRKind;
  46 import org.graalvm.compiler.core.common.NumUtil;
  47 import org.graalvm.compiler.core.common.calc.Condition;
  48 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
  49 import org.graalvm.compiler.core.common.spi.LIRKindTool;
  50 import org.graalvm.compiler.debug.GraalError;
  51 import org.graalvm.compiler.lir.ConstantValue;
  52 import org.graalvm.compiler.lir.LIRFrameState;
  53 import org.graalvm.compiler.lir.LIRInstruction;
  54 import org.graalvm.compiler.lir.LIRValueUtil;
  55 import org.graalvm.compiler.lir.LabelRef;
  56 import org.graalvm.compiler.lir.StandardOp.JumpOp;
  57 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp;
  58 import org.graalvm.compiler.lir.SwitchStrategy;
  59 import org.graalvm.compiler.lir.Variable;
  60 import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
  61 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
  62 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;
  63 import org.graalvm.compiler.lir.amd64.AMD64Binary;
  64 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
  65 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
  66 import org.graalvm.compiler.lir.amd64.AMD64Call;
  67 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow;
  68 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
  69 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;
  70 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp;
  71 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
  72 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp;
  73 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp;
  74 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
  75 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
  76 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
  77 import org.graalvm.compiler.lir.amd64.AMD64Move;
  78 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
  79 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
  80 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp;
  81 import org.graalvm.compiler.lir.amd64.AMD64PauseOp;
  82 import org.graalvm.compiler.lir.amd64.AMD64StringIndexOfOp;
  83 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp;
  84 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp;
  85 import org.graalvm.compiler.lir.gen.LIRGenerationResult;
  86 import org.graalvm.compiler.lir.gen.LIRGenerator;
  87 import org.graalvm.compiler.phases.util.Providers;
  88 
  89 import jdk.vm.ci.amd64.AMD64;
  90 import jdk.vm.ci.amd64.AMD64Kind;
  91 import jdk.vm.ci.code.CallingConvention;
  92 import jdk.vm.ci.code.Register;
  93 import jdk.vm.ci.code.RegisterValue;


 246         ValueKind<?> kind = newValue.getValueKind();
 247         Variable result = newVariable(kind);
 248         AMD64AddressValue addressValue = asAddressValue(address);
 249         append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue)));
 250         return result;
 251     }
 252 
 253     @Override
 254     public void emitNullCheck(Value address, LIRFrameState state) {
 255         append(new AMD64Move.NullCheckOp(asAddressValue(address), state));
 256     }
 257 
 258     @Override
 259     public void emitJump(LabelRef label) {
 260         assert label != null;
 261         append(new JumpOp(label));
 262     }
 263 
 264     @Override
 265     public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) {
 266         Condition finalCondition = emitCompare(cmpKind, left, right, cond);

 267         if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) {
 268             append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
 269         } else {
 270             append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
 271         }
 272     }
 273 
 274     public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel,
 275                     double trueLabelProbability) {
 276         boolean mirrored = emitCompareMemory(cmpKind, left, right, state);
 277         Condition finalCondition = mirrored ? cond.mirror() : cond;
 278         if (cmpKind.isXMM()) {
 279             append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability));
 280         } else {
 281             append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability));
 282         }
 283     }
 284 
 285     @Override
 286     public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) {
 287         append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability));
 288     }
 289 
 290     @Override
 291     public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) {
 292         emitIntegerTest(left, right);
 293         append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability));
 294     }
 295 
 296     @Override
 297     public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) {
 298         boolean isFloatComparison = cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE;

 299 
 300         Condition finalCondition = cond;
 301         Value finalTrueValue = trueValue;
 302         Value finalFalseValue = falseValue;
 303         if (isFloatComparison) {
 304             // eliminate the parity check in case of a float comparison
 305             Value finalLeft = left;
 306             Value finalRight = right;
 307             if (unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition)) {
 308                 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.mirror())) {
 309                     finalCondition = finalCondition.mirror();
 310                     finalLeft = right;
 311                     finalRight = left;
 312                 } else if (finalCondition != Condition.EQ && finalCondition != Condition.NE) {
 313                     // negating EQ and NE does not make any sense as we would need to negate
 314                     // unorderedIsTrue as well (otherwise, we would no longer fulfill the Java
 315                     // NaN semantics)
 316                     assert unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate());
 317                     finalCondition = finalCondition.negate();
 318                     finalTrueValue = falseValue;
 319                     finalFalseValue = trueValue;
 320                 }
 321             }
 322             emitRawCompare(cmpKind, finalLeft, finalRight);
 323         } else {
 324             finalCondition = emitCompare(cmpKind, left, right, cond);
 325         }
 326 
 327         boolean isParityCheckNecessary = isFloatComparison && unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition);
 328         Variable result = newVariable(finalTrueValue.getValueKind());
 329         if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 1) && isIntConstant(finalFalseValue, 0)) {
 330             if (isFloatComparison) {
 331                 append(new FloatCondSetOp(result, finalCondition));
 332             } else {
 333                 append(new CondSetOp(result, finalCondition));
 334             }
 335         } else if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 0) && isIntConstant(finalFalseValue, 1)) {
 336             if (isFloatComparison) {
 337                 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate())) {
 338                     append(new FloatCondSetOp(result, finalCondition.negate()));
 339                 } else {
 340                     append(new FloatCondSetOp(result, finalCondition));
 341                     Variable negatedResult = newVariable(result.getValueKind());
 342                     append(new AMD64Binary.ConstOp(AMD64BinaryArithmetic.XOR, OperandSize.get(result.getPlatformKind()), negatedResult, result, 1));
 343                     result = negatedResult;
 344                 }
 345             } else {
 346                 append(new CondSetOp(result, finalCondition.negate()));
 347             }
 348         } else if (isFloatComparison) {
 349             append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue)));
 350         } else {
 351             append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue)));
 352         }
 353         return result;
 354     }
 355 
 356     @Override
 357     public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
 358         emitIntegerTest(left, right);
 359         Variable result = newVariable(trueValue.getValueKind());
 360         append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue)));
 361         return result;
 362     }
 363 
 364     private void emitIntegerTest(Value a, Value b) {
 365         assert ((AMD64Kind) a.getPlatformKind()).isInteger();
 366         OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
 367         if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) {
 368             append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong()));
 369         } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) {
 370             append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong()));
 371         } else if (isAllocatableValue(b)) {


 428                 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state));
 429                 return true;
 430             } else {
 431                 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state);
 432             }
 433         }
 434     }
 435 
 436     private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) {
 437         AMD64RMOp op = CMP.getRMOpcode(size);
 438         append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state));
 439         return false;
 440     }
 441 
 442     /**
 443      * This method emits the compare instruction, and may reorder the operands. It returns true if
 444      * it did so.
 445      *
 446      * @param a the left operand of the comparison
 447      * @param b the right operand of the comparison
 448      * @param cond the condition of the comparison
 449      * @return true if the left and right operands were switched, false otherwise
 450      */
 451     private Condition emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond) {



 452         if (LIRValueUtil.isVariable(b)) {
 453             emitRawCompare(cmpKind, b, a);
 454             return cond.mirror();
 455         } else {
 456             emitRawCompare(cmpKind, a, b);
 457             return cond;


 458         }
 459     }
 460 
 461     private void emitRawCompare(PlatformKind cmpKind, Value left, Value right) {
 462         ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, load(left), loadNonConst(right));
 463     }
 464 
 465     @Override
 466     public void emitMembar(int barriers) {
 467         int necessaryBarriers = target().arch.requiredBarriers(barriers);
 468         if (target().isMP && necessaryBarriers != 0) {
 469             append(new MembarOp(necessaryBarriers));
 470         }
 471     }
 472 
 473     public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments);
 474 
 475     @Override
 476     protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) {
 477         long maxOffset = linkage.getMaxCallTargetOffset();
 478         if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) {
 479             append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info));
 480         } else {
 481             append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info));
 482         }


< prev index next >