1 /*
   2  * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.amd64;
  24 
  25 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  26 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  27 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  28 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  30 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  31 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  32 import static java.lang.Double.doubleToRawLongBits;
  33 import static java.lang.Float.floatToRawIntBits;
  34 import static jdk.vm.ci.code.ValueUtil.asRegister;
  35 import static jdk.vm.ci.code.ValueUtil.isRegister;
  36 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  37 
  38 import org.graalvm.compiler.core.common.NumUtil;
  39 import org.graalvm.compiler.asm.amd64.AMD64Address;
  40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  41 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
  42 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
  43 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  44 import org.graalvm.compiler.core.common.type.DataPointerConstant;
  45 import org.graalvm.compiler.debug.GraalError;
  46 import org.graalvm.compiler.lir.LIRFrameState;
  47 import org.graalvm.compiler.lir.LIRInstructionClass;
  48 import org.graalvm.compiler.lir.Opcode;
  49 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
  50 import org.graalvm.compiler.lir.StandardOp.NullCheck;
  51 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  52 import org.graalvm.compiler.lir.VirtualStackSlot;
  53 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  54 
  55 import jdk.vm.ci.amd64.AMD64;
  56 import jdk.vm.ci.amd64.AMD64Kind;
  57 import jdk.vm.ci.code.Register;
  58 import jdk.vm.ci.code.StackSlot;
  59 import jdk.vm.ci.meta.AllocatableValue;
  60 import jdk.vm.ci.meta.Constant;
  61 import jdk.vm.ci.meta.JavaConstant;
  62 import jdk.vm.ci.meta.Value;
  63 
  64 public class AMD64Move {
  65 
  66     private abstract static class AbstractMoveOp extends AMD64LIRInstruction implements ValueMoveOp {
  67         public static final LIRInstructionClass<AbstractMoveOp> TYPE = LIRInstructionClass.create(AbstractMoveOp.class);
  68 
  69         private AMD64Kind moveKind;
  70 
  71         protected AbstractMoveOp(LIRInstructionClass<? extends AbstractMoveOp> c, AMD64Kind moveKind) {
  72             super(c);
  73             this.moveKind = moveKind;
  74         }
  75 
  76         @Override
  77         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  78             move(moveKind, crb, masm, getResult(), getInput());
  79         }
  80     }
  81 
  82     @Opcode("MOVE")
  83     public static final class MoveToRegOp extends AbstractMoveOp {
  84         public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);
  85 
  86         @Def({REG, HINT}) protected AllocatableValue result;
  87         @Use({REG, STACK}) protected AllocatableValue input;
  88 
  89         public MoveToRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
  90             super(TYPE, moveKind);
  91             this.result = result;
  92             this.input = input;
  93         }
  94 
  95         @Override
  96         public AllocatableValue getInput() {
  97             return input;
  98         }
  99 
 100         @Override
 101         public AllocatableValue getResult() {
 102             return result;
 103         }
 104     }
 105 
 106     @Opcode("MOVE")
 107     public static final class MoveFromRegOp extends AbstractMoveOp {
 108         public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class);
 109 
 110         @Def({REG, STACK}) protected AllocatableValue result;
 111         @Use({REG, HINT}) protected AllocatableValue input;
 112 
 113         public MoveFromRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
 114             super(TYPE, moveKind);
 115             this.result = result;
 116             this.input = input;
 117         }
 118 
 119         @Override
 120         public AllocatableValue getInput() {
 121             return input;
 122         }
 123 
 124         @Override
 125         public AllocatableValue getResult() {
 126             return result;
 127         }
 128     }
 129 
 130     @Opcode("MOVE")
 131     public static class MoveFromConstOp extends AMD64LIRInstruction implements LoadConstantOp {
 132         public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);
 133 
 134         @Def({REG, STACK}) protected AllocatableValue result;
 135         private final JavaConstant input;
 136 
 137         public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
 138             super(TYPE);
 139             this.result = result;
 140             this.input = input;
 141         }
 142 
 143         @Override
 144         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 145             if (isRegister(result)) {
 146                 const2reg(crb, masm, asRegister(result), input);
 147             } else {
 148                 assert isStackSlot(result);
 149                 const2stack(crb, masm, result, input);
 150             }
 151         }
 152 
 153         @Override
 154         public Constant getConstant() {
 155             return input;
 156         }
 157 
 158         @Override
 159         public AllocatableValue getResult() {
 160             return result;
 161         }
 162     }
 163 
 164     @Opcode("STACKMOVE")
 165     public static final class AMD64StackMove extends AMD64LIRInstruction implements ValueMoveOp {
 166         public static final LIRInstructionClass<AMD64StackMove> TYPE = LIRInstructionClass.create(AMD64StackMove.class);
 167 
 168         @Def({STACK}) protected AllocatableValue result;
 169         @Use({STACK, HINT}) protected AllocatableValue input;
 170         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 171 
 172         private Register scratch;
 173 
 174         public AMD64StackMove(AllocatableValue result, AllocatableValue input, Register scratch, AllocatableValue backupSlot) {
 175             super(TYPE);
 176             this.result = result;
 177             this.input = input;
 178             this.backupSlot = backupSlot;
 179             this.scratch = scratch;
 180         }
 181 
 182         @Override
 183         public AllocatableValue getInput() {
 184             return input;
 185         }
 186 
 187         @Override
 188         public AllocatableValue getResult() {
 189             return result;
 190         }
 191 
 192         public Register getScratchRegister() {
 193             return scratch;
 194         }
 195 
 196         public AllocatableValue getBackupSlot() {
 197             return backupSlot;
 198         }
 199 
 200         @Override
 201         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 202             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 203             if (backupKind.isXMM()) {
 204                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 205                 backupKind = AMD64Kind.DOUBLE;
 206             }
 207 
 208             // backup scratch register
 209             reg2stack(backupKind, crb, masm, backupSlot, scratch);
 210             // move stack slot
 211             stack2reg((AMD64Kind) getInput().getPlatformKind(), crb, masm, scratch, getInput());
 212             reg2stack((AMD64Kind) getResult().getPlatformKind(), crb, masm, getResult(), scratch);
 213             // restore scratch register
 214             stack2reg(backupKind, crb, masm, scratch, backupSlot);
 215         }
 216     }
 217 
 218     @Opcode("MULTISTACKMOVE")
 219     public static final class AMD64MultiStackMove extends AMD64LIRInstruction {
 220         public static final LIRInstructionClass<AMD64MultiStackMove> TYPE = LIRInstructionClass.create(AMD64MultiStackMove.class);
 221 
 222         @Def({STACK}) protected AllocatableValue[] results;
 223         @Use({STACK}) protected Value[] inputs;
 224         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 225 
 226         private Register scratch;
 227 
 228         public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Register scratch, AllocatableValue backupSlot) {
 229             super(TYPE);
 230             this.results = results;
 231             this.inputs = inputs;
 232             this.backupSlot = backupSlot;
 233             this.scratch = scratch;
 234         }
 235 
 236         @Override
 237         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 238             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 239             if (backupKind.isXMM()) {
 240                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 241                 backupKind = AMD64Kind.DOUBLE;
 242             }
 243 
 244             // backup scratch register
 245             move(backupKind, crb, masm, backupSlot, scratch.asValue(backupSlot.getValueKind()));
 246             for (int i = 0; i < results.length; i++) {
 247                 Value input = inputs[i];
 248                 AllocatableValue result = results[i];
 249                 // move stack slot
 250                 move((AMD64Kind) input.getPlatformKind(), crb, masm, scratch.asValue(input.getValueKind()), input);
 251                 move((AMD64Kind) result.getPlatformKind(), crb, masm, result, scratch.asValue(result.getValueKind()));
 252             }
 253             // restore scratch register
 254             move(backupKind, crb, masm, scratch.asValue(backupSlot.getValueKind()), backupSlot);
 255         }
 256     }
 257 
 258     @Opcode("STACKMOVE")
 259     public static final class AMD64PushPopStackMove extends AMD64LIRInstruction implements ValueMoveOp {
 260         public static final LIRInstructionClass<AMD64PushPopStackMove> TYPE = LIRInstructionClass.create(AMD64PushPopStackMove.class);
 261 
 262         @Def({STACK}) protected AllocatableValue result;
 263         @Use({STACK, HINT}) protected AllocatableValue input;
 264         private final OperandSize size;
 265 
 266         public AMD64PushPopStackMove(OperandSize size, AllocatableValue result, AllocatableValue input) {
 267             super(TYPE);
 268             this.result = result;
 269             this.input = input;
 270             this.size = size;
 271         }
 272 
 273         @Override
 274         public AllocatableValue getInput() {
 275             return input;
 276         }
 277 
 278         @Override
 279         public AllocatableValue getResult() {
 280             return result;
 281         }
 282 
 283         @Override
 284         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 285             AMD64MOp.PUSH.emit(masm, size, (AMD64Address) crb.asAddress(input));
 286             AMD64MOp.POP.emit(masm, size, (AMD64Address) crb.asAddress(result));
 287         }
 288     }
 289 
 290     public static final class LeaOp extends AMD64LIRInstruction {
 291         public static final LIRInstructionClass<LeaOp> TYPE = LIRInstructionClass.create(LeaOp.class);
 292 
 293         @Def({REG}) protected AllocatableValue result;
 294         @Use({COMPOSITE, UNINITIALIZED}) protected AMD64AddressValue address;
 295 
 296         public LeaOp(AllocatableValue result, AMD64AddressValue address) {
 297             super(TYPE);
 298             this.result = result;
 299             this.address = address;
 300         }
 301 
 302         @Override
 303         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 304             masm.leaq(asRegister(result, AMD64Kind.QWORD), address.toAddress());
 305         }
 306     }
 307 
 308     public static final class LeaDataOp extends AMD64LIRInstruction {
 309         public static final LIRInstructionClass<LeaDataOp> TYPE = LIRInstructionClass.create(LeaDataOp.class);
 310 
 311         @Def({REG}) protected AllocatableValue result;
 312         private final DataPointerConstant data;
 313 
 314         public LeaDataOp(AllocatableValue result, DataPointerConstant data) {
 315             super(TYPE);
 316             this.result = result;
 317             this.data = data;
 318         }
 319 
 320         @Override
 321         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 322             masm.leaq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(data));
 323         }
 324     }
 325 
 326     public static final class StackLeaOp extends AMD64LIRInstruction {
 327         public static final LIRInstructionClass<StackLeaOp> TYPE = LIRInstructionClass.create(StackLeaOp.class);
 328 
 329         @Def({REG}) protected AllocatableValue result;
 330         @Use({STACK, UNINITIALIZED}) protected AllocatableValue slot;
 331 
 332         public StackLeaOp(AllocatableValue result, AllocatableValue slot) {
 333             super(TYPE);
 334             this.result = result;
 335             this.slot = slot;
 336             assert slot instanceof VirtualStackSlot || slot instanceof StackSlot;
 337         }
 338 
 339         @Override
 340         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 341             masm.leaq(asRegister(result, AMD64Kind.QWORD), (AMD64Address) crb.asAddress(slot));
 342         }
 343     }
 344 
 345     public static final class MembarOp extends AMD64LIRInstruction {
 346         public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class);
 347 
 348         private final int barriers;
 349 
 350         public MembarOp(final int barriers) {
 351             super(TYPE);
 352             this.barriers = barriers;
 353         }
 354 
 355         @Override
 356         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 357             masm.membar(barriers);
 358         }
 359     }
 360 
 361     public static final class NullCheckOp extends AMD64LIRInstruction implements NullCheck {
 362         public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class);
 363 
 364         @Use({COMPOSITE}) protected AMD64AddressValue address;
 365         @State protected LIRFrameState state;
 366 
 367         public NullCheckOp(AMD64AddressValue address, LIRFrameState state) {
 368             super(TYPE);
 369             this.address = address;
 370             this.state = state;
 371         }
 372 
 373         @Override
 374         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 375             crb.recordImplicitException(masm.position(), state);
 376             masm.nullCheck(address.toAddress());
 377         }
 378 
 379         @Override
 380         public Value getCheckedValue() {
 381             return address.base;
 382         }
 383 
 384         @Override
 385         public LIRFrameState getState() {
 386             return state;
 387         }
 388     }
 389 
 390     @Opcode("CAS")
 391     public static final class CompareAndSwapOp extends AMD64LIRInstruction {
 392         public static final LIRInstructionClass<CompareAndSwapOp> TYPE = LIRInstructionClass.create(CompareAndSwapOp.class);
 393 
 394         private final AMD64Kind accessKind;
 395 
 396         @Def protected AllocatableValue result;
 397         @Use({COMPOSITE}) protected AMD64AddressValue address;
 398         @Use protected AllocatableValue cmpValue;
 399         @Use protected AllocatableValue newValue;
 400 
 401         public CompareAndSwapOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue) {
 402             super(TYPE);
 403             this.accessKind = accessKind;
 404             this.result = result;
 405             this.address = address;
 406             this.cmpValue = cmpValue;
 407             this.newValue = newValue;
 408         }
 409 
 410         @Override
 411         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 412             assert asRegister(cmpValue).equals(AMD64.rax) && asRegister(result).equals(AMD64.rax);
 413 
 414             if (crb.target.isMP) {
 415                 masm.lock();
 416             }
 417             switch (accessKind) {
 418                 case DWORD:
 419                     masm.cmpxchgl(asRegister(newValue), address.toAddress());
 420                     break;
 421                 case QWORD:
 422                     masm.cmpxchgq(asRegister(newValue), address.toAddress());
 423                     break;
 424                 default:
 425                     throw GraalError.shouldNotReachHere();
 426             }
 427         }
 428     }
 429 
 430     @Opcode("ATOMIC_READ_AND_ADD")
 431     public static final class AtomicReadAndAddOp extends AMD64LIRInstruction {
 432         public static final LIRInstructionClass<AtomicReadAndAddOp> TYPE = LIRInstructionClass.create(AtomicReadAndAddOp.class);
 433 
 434         private final AMD64Kind accessKind;
 435 
 436         @Def protected AllocatableValue result;
 437         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 438         @Use protected AllocatableValue delta;
 439 
 440         public AtomicReadAndAddOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue delta) {
 441             super(TYPE);
 442             this.accessKind = accessKind;
 443             this.result = result;
 444             this.address = address;
 445             this.delta = delta;
 446         }
 447 
 448         @Override
 449         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 450             move(accessKind, crb, masm, result, delta);
 451             if (crb.target.isMP) {
 452                 masm.lock();
 453             }
 454             switch (accessKind) {
 455                 case DWORD:
 456                     masm.xaddl(address.toAddress(), asRegister(result));
 457                     break;
 458                 case QWORD:
 459                     masm.xaddq(address.toAddress(), asRegister(result));
 460                     break;
 461                 default:
 462                     throw GraalError.shouldNotReachHere();
 463             }
 464         }
 465     }
 466 
 467     @Opcode("ATOMIC_READ_AND_WRITE")
 468     public static final class AtomicReadAndWriteOp extends AMD64LIRInstruction {
 469         public static final LIRInstructionClass<AtomicReadAndWriteOp> TYPE = LIRInstructionClass.create(AtomicReadAndWriteOp.class);
 470 
 471         private final AMD64Kind accessKind;
 472 
 473         @Def protected AllocatableValue result;
 474         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 475         @Use protected AllocatableValue newValue;
 476 
 477         public AtomicReadAndWriteOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue newValue) {
 478             super(TYPE);
 479             this.accessKind = accessKind;
 480             this.result = result;
 481             this.address = address;
 482             this.newValue = newValue;
 483         }
 484 
 485         @Override
 486         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 487             move(accessKind, crb, masm, result, newValue);
 488             switch (accessKind) {
 489                 case DWORD:
 490                     masm.xchgl(asRegister(result), address.toAddress());
 491                     break;
 492                 case QWORD:
 493                     masm.xchgq(asRegister(result), address.toAddress());
 494                     break;
 495                 default:
 496                     throw GraalError.shouldNotReachHere();
 497             }
 498         }
 499     }
 500 
 501     public static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 502         move((AMD64Kind) result.getPlatformKind(), crb, masm, result, input);
 503     }
 504 
 505     public static void move(AMD64Kind moveKind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 506         if (isRegister(input)) {
 507             if (isRegister(result)) {
 508                 reg2reg(moveKind, masm, result, input);
 509             } else if (isStackSlot(result)) {
 510                 reg2stack(moveKind, crb, masm, result, asRegister(input));
 511             } else {
 512                 throw GraalError.shouldNotReachHere();
 513             }
 514         } else if (isStackSlot(input)) {
 515             if (isRegister(result)) {
 516                 stack2reg(moveKind, crb, masm, asRegister(result), input);
 517             } else {
 518                 throw GraalError.shouldNotReachHere();
 519             }
 520         } else if (isJavaConstant(input)) {
 521             if (isRegister(result)) {
 522                 const2reg(crb, masm, asRegister(result), asJavaConstant(input));
 523             } else if (isStackSlot(result)) {
 524                 const2stack(crb, masm, result, asJavaConstant(input));
 525             } else {
 526                 throw GraalError.shouldNotReachHere();
 527             }
 528         } else {
 529             throw GraalError.shouldNotReachHere();
 530         }
 531     }
 532 
 533     private static void reg2reg(AMD64Kind kind, AMD64MacroAssembler masm, Value result, Value input) {
 534         if (asRegister(input).equals(asRegister(result))) {
 535             return;
 536         }
 537         switch (kind) {
 538             case BYTE:
 539             case WORD:
 540             case DWORD:
 541                 masm.movl(asRegister(result), asRegister(input));
 542                 break;
 543             case QWORD:
 544                 masm.movq(asRegister(result), asRegister(input));
 545                 break;
 546             case SINGLE:
 547                 masm.movflt(asRegister(result, AMD64Kind.SINGLE), asRegister(input, AMD64Kind.SINGLE));
 548                 break;
 549             case DOUBLE:
 550                 masm.movdbl(asRegister(result, AMD64Kind.DOUBLE), asRegister(input, AMD64Kind.DOUBLE));
 551                 break;
 552             default:
 553                 throw GraalError.shouldNotReachHere("kind=" + kind);
 554         }
 555     }
 556 
 557     public static void reg2stack(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Register input) {
 558         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 559         switch (kind) {
 560             case BYTE:
 561                 masm.movb(dest, input);
 562                 break;
 563             case WORD:
 564                 masm.movw(dest, input);
 565                 break;
 566             case DWORD:
 567                 masm.movl(dest, input);
 568                 break;
 569             case QWORD:
 570                 masm.movq(dest, input);
 571                 break;
 572             case SINGLE:
 573                 masm.movflt(dest, input);
 574                 break;
 575             case DOUBLE:
 576                 masm.movsd(dest, input);
 577                 break;
 578             default:
 579                 throw GraalError.shouldNotReachHere();
 580         }
 581     }
 582 
 583     public static void stack2reg(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Value input) {
 584         AMD64Address src = (AMD64Address) crb.asAddress(input);
 585         switch (kind) {
 586             case BYTE:
 587                 masm.movsbl(result, src);
 588                 break;
 589             case WORD:
 590                 masm.movswl(result, src);
 591                 break;
 592             case DWORD:
 593                 masm.movl(result, src);
 594                 break;
 595             case QWORD:
 596                 masm.movq(result, src);
 597                 break;
 598             case SINGLE:
 599                 masm.movflt(result, src);
 600                 break;
 601             case DOUBLE:
 602                 masm.movdbl(result, src);
 603                 break;
 604             default:
 605                 throw GraalError.shouldNotReachHere();
 606         }
 607     }
 608 
 609     public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, JavaConstant input) {
 610         /*
 611          * Note: we use the kind of the input operand (and not the kind of the result operand)
 612          * because they don't match in all cases. For example, an object constant can be loaded to a
 613          * long register when unsafe casts occurred (e.g., for a write barrier where arithmetic
 614          * operations are then performed on the pointer).
 615          */
 616         switch (input.getJavaKind().getStackKind()) {
 617             case Int:
 618                 // Do not optimize with an XOR as this instruction may be between
 619                 // a CMP and a Jcc in which case the XOR will modify the condition
 620                 // flags and interfere with the Jcc.
 621                 masm.movl(result, input.asInt());
 622 
 623                 break;
 624             case Long:
 625                 // Do not optimize with an XOR as this instruction may be between
 626                 // a CMP and a Jcc in which case the XOR will modify the condition
 627                 // flags and interfere with the Jcc.
 628                 if (input.asLong() == (int) input.asLong()) {
 629                     // Sign extended to long
 630                     masm.movslq(result, (int) input.asLong());
 631                 } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) {
 632                     // Zero extended to long
 633                     masm.movl(result, (int) input.asLong());
 634                 } else {
 635                     masm.movq(result, input.asLong());
 636                 }
 637                 break;
 638             case Float:
 639                 // This is *not* the same as 'constant == 0.0f' in the case where constant is -0.0f
 640                 if (Float.floatToRawIntBits(input.asFloat()) == Float.floatToRawIntBits(0.0f)) {
 641                     masm.xorps(result, result);
 642                 } else {
 643                     masm.movflt(result, (AMD64Address) crb.asFloatConstRef(input));
 644                 }
 645                 break;
 646             case Double:
 647                 // This is *not* the same as 'constant == 0.0d' in the case where constant is -0.0d
 648                 if (Double.doubleToRawLongBits(input.asDouble()) == Double.doubleToRawLongBits(0.0d)) {
 649                     masm.xorpd(result, result);
 650                 } else {
 651                     masm.movdbl(result, (AMD64Address) crb.asDoubleConstRef(input));
 652                 }
 653                 break;
 654             case Object:
 655                 // Do not optimize with an XOR as this instruction may be between
 656                 // a CMP and a Jcc in which case the XOR will modify the condition
 657                 // flags and interfere with the Jcc.
 658                 if (input.isNull()) {
 659                     masm.movq(result, 0x0L);
 660                 } else if (crb.target.inlineObjects) {
 661                     crb.recordInlineDataInCode(input);
 662                     masm.movq(result, 0xDEADDEADDEADDEADL);
 663                 } else {
 664                     masm.movq(result, (AMD64Address) crb.recordDataReferenceInCode(input, 0));
 665                 }
 666                 break;
 667             default:
 668                 throw GraalError.shouldNotReachHere();
 669         }
 670     }
 671 
 672     public static boolean canMoveConst2Stack(JavaConstant input) {
 673         switch (input.getJavaKind().getStackKind()) {
 674             case Int:
 675                 break;
 676             case Long:
 677                 break;
 678             case Float:
 679                 break;
 680             case Double:
 681                 break;
 682             case Object:
 683                 if (input.isNull()) {
 684                     return true;
 685                 } else {
 686                     return false;
 687                 }
 688             default:
 689                 return false;
 690         }
 691         return true;
 692     }
 693 
 694     public static void const2stack(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, JavaConstant input) {
 695         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 696         final long imm;
 697         switch (input.getJavaKind().getStackKind()) {
 698             case Int:
 699                 imm = input.asInt();
 700                 break;
 701             case Long:
 702                 imm = input.asLong();
 703                 break;
 704             case Float:
 705                 imm = floatToRawIntBits(input.asFloat());
 706                 break;
 707             case Double:
 708                 imm = doubleToRawLongBits(input.asDouble());
 709                 break;
 710             case Object:
 711                 if (input.isNull()) {
 712                     imm = 0;
 713                 } else {
 714                     throw GraalError.shouldNotReachHere("Non-null object constants must be in register");
 715                 }
 716                 break;
 717             default:
 718                 throw GraalError.shouldNotReachHere();
 719         }
 720 
 721         switch ((AMD64Kind) result.getPlatformKind()) {
 722             case BYTE:
 723                 assert NumUtil.isByte(imm) : "Is not in byte range: " + imm;
 724                 AMD64MIOp.MOVB.emit(masm, OperandSize.BYTE, dest, (int) imm);
 725                 break;
 726             case WORD:
 727                 assert NumUtil.isShort(imm) : "Is not in short range: " + imm;
 728                 AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm);
 729                 break;
 730             case DWORD:
 731             case SINGLE:
 732                 assert NumUtil.isInt(imm) : "Is not in int range: " + imm;
 733                 masm.movl(dest, (int) imm);
 734                 break;
 735             case QWORD:
 736             case DOUBLE:
 737                 masm.movlong(dest, imm);
 738                 break;
 739             default:
 740                 throw GraalError.shouldNotReachHere("Unknown result Kind: " + result.getPlatformKind());
 741         }
 742     }
 743 }