1 /*
   2  * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.amd64;
  24 
  25 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag.Equal;
  26 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  27 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  28 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  30 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  31 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  32 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  33 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  34 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  35 import static java.lang.Double.doubleToRawLongBits;
  36 import static java.lang.Float.floatToRawIntBits;
  37 import static jdk.vm.ci.code.ValueUtil.asRegister;
  38 import static jdk.vm.ci.code.ValueUtil.isRegister;
  39 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  40 
  41 import org.graalvm.compiler.asm.Label;
  42 import org.graalvm.compiler.core.common.CompressEncoding;
  43 import org.graalvm.compiler.core.common.LIRKind;
  44 import org.graalvm.compiler.core.common.NumUtil;
  45 import org.graalvm.compiler.asm.amd64.AMD64Address;
  46 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
  48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
  49 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  50 import org.graalvm.compiler.core.common.spi.LIRKindTool;
  51 import org.graalvm.compiler.core.common.type.DataPointerConstant;
  52 import org.graalvm.compiler.debug.GraalError;
  53 import org.graalvm.compiler.lir.LIRFrameState;
  54 import org.graalvm.compiler.lir.LIRInstructionClass;
  55 import org.graalvm.compiler.lir.Opcode;
  56 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
  57 import org.graalvm.compiler.lir.StandardOp.NullCheck;
  58 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  59 import org.graalvm.compiler.lir.VirtualStackSlot;
  60 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  61 
  62 import jdk.vm.ci.amd64.AMD64;
  63 import jdk.vm.ci.amd64.AMD64Kind;
  64 import jdk.vm.ci.code.Register;
  65 import jdk.vm.ci.code.StackSlot;
  66 import jdk.vm.ci.meta.AllocatableValue;
  67 import jdk.vm.ci.meta.Constant;
  68 import jdk.vm.ci.meta.JavaConstant;
  69 import jdk.vm.ci.meta.Value;
  70 
  71 public class AMD64Move {
  72 
  73     private abstract static class AbstractMoveOp extends AMD64LIRInstruction implements ValueMoveOp {
  74         public static final LIRInstructionClass<AbstractMoveOp> TYPE = LIRInstructionClass.create(AbstractMoveOp.class);
  75 
  76         private AMD64Kind moveKind;
  77 
  78         protected AbstractMoveOp(LIRInstructionClass<? extends AbstractMoveOp> c, AMD64Kind moveKind) {
  79             super(c);
  80             this.moveKind = moveKind;
  81         }
  82 
  83         @Override
  84         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  85             move(moveKind, crb, masm, getResult(), getInput());
  86         }
  87     }
  88 
  89     @Opcode("MOVE")
  90     public static final class MoveToRegOp extends AbstractMoveOp {
  91         public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);
  92 
  93         @Def({REG, HINT}) protected AllocatableValue result;
  94         @Use({REG, STACK}) protected AllocatableValue input;
  95 
  96         public MoveToRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
  97             super(TYPE, moveKind);
  98             this.result = result;
  99             this.input = input;
 100         }
 101 
 102         @Override
 103         public AllocatableValue getInput() {
 104             return input;
 105         }
 106 
 107         @Override
 108         public AllocatableValue getResult() {
 109             return result;
 110         }
 111     }
 112 
 113     @Opcode("MOVE")
 114     public static final class MoveFromRegOp extends AbstractMoveOp {
 115         public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class);
 116 
 117         @Def({REG, STACK}) protected AllocatableValue result;
 118         @Use({REG, HINT}) protected AllocatableValue input;
 119 
 120         public MoveFromRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
 121             super(TYPE, moveKind);
 122             this.result = result;
 123             this.input = input;
 124         }
 125 
 126         @Override
 127         public AllocatableValue getInput() {
 128             return input;
 129         }
 130 
 131         @Override
 132         public AllocatableValue getResult() {
 133             return result;
 134         }
 135     }
 136 
 137     @Opcode("MOVE")
 138     public static class MoveFromConstOp extends AMD64LIRInstruction implements LoadConstantOp {
 139         public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);
 140 
 141         @Def({REG, STACK}) protected AllocatableValue result;
 142         private final JavaConstant input;
 143 
 144         public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
 145             super(TYPE);
 146             this.result = result;
 147             this.input = input;
 148         }
 149 
 150         @Override
 151         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 152             if (isRegister(result)) {
 153                 const2reg(crb, masm, asRegister(result), input);
 154             } else {
 155                 assert isStackSlot(result);
 156                 const2stack(crb, masm, result, input);
 157             }
 158         }
 159 
 160         @Override
 161         public Constant getConstant() {
 162             return input;
 163         }
 164 
 165         @Override
 166         public AllocatableValue getResult() {
 167             return result;
 168         }
 169     }
 170 
 171     @Opcode("STACKMOVE")
 172     public static final class AMD64StackMove extends AMD64LIRInstruction implements ValueMoveOp {
 173         public static final LIRInstructionClass<AMD64StackMove> TYPE = LIRInstructionClass.create(AMD64StackMove.class);
 174 
 175         @Def({STACK}) protected AllocatableValue result;
 176         @Use({STACK, HINT}) protected AllocatableValue input;
 177         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 178 
 179         private Register scratch;
 180 
 181         public AMD64StackMove(AllocatableValue result, AllocatableValue input, Register scratch, AllocatableValue backupSlot) {
 182             super(TYPE);
 183             this.result = result;
 184             this.input = input;
 185             this.backupSlot = backupSlot;
 186             this.scratch = scratch;
 187         }
 188 
 189         @Override
 190         public AllocatableValue getInput() {
 191             return input;
 192         }
 193 
 194         @Override
 195         public AllocatableValue getResult() {
 196             return result;
 197         }
 198 
 199         public Register getScratchRegister() {
 200             return scratch;
 201         }
 202 
 203         public AllocatableValue getBackupSlot() {
 204             return backupSlot;
 205         }
 206 
 207         @Override
 208         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 209             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 210             if (backupKind.isXMM()) {
 211                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 212                 backupKind = AMD64Kind.DOUBLE;
 213             }
 214 
 215             // backup scratch register
 216             reg2stack(backupKind, crb, masm, backupSlot, scratch);
 217             // move stack slot
 218             stack2reg((AMD64Kind) getInput().getPlatformKind(), crb, masm, scratch, getInput());
 219             reg2stack((AMD64Kind) getResult().getPlatformKind(), crb, masm, getResult(), scratch);
 220             // restore scratch register
 221             stack2reg(backupKind, crb, masm, scratch, backupSlot);
 222         }
 223     }
 224 
 225     @Opcode("MULTISTACKMOVE")
 226     public static final class AMD64MultiStackMove extends AMD64LIRInstruction {
 227         public static final LIRInstructionClass<AMD64MultiStackMove> TYPE = LIRInstructionClass.create(AMD64MultiStackMove.class);
 228 
 229         @Def({STACK}) protected AllocatableValue[] results;
 230         @Use({STACK}) protected Value[] inputs;
 231         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 232 
 233         private Register scratch;
 234 
 235         public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Register scratch, AllocatableValue backupSlot) {
 236             super(TYPE);
 237             this.results = results;
 238             this.inputs = inputs;
 239             this.backupSlot = backupSlot;
 240             this.scratch = scratch;
 241         }
 242 
 243         @Override
 244         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 245             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 246             if (backupKind.isXMM()) {
 247                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 248                 backupKind = AMD64Kind.DOUBLE;
 249             }
 250 
 251             // backup scratch register
 252             move(backupKind, crb, masm, backupSlot, scratch.asValue(backupSlot.getValueKind()));
 253             for (int i = 0; i < results.length; i++) {
 254                 Value input = inputs[i];
 255                 AllocatableValue result = results[i];
 256                 // move stack slot
 257                 move((AMD64Kind) input.getPlatformKind(), crb, masm, scratch.asValue(input.getValueKind()), input);
 258                 move((AMD64Kind) result.getPlatformKind(), crb, masm, result, scratch.asValue(result.getValueKind()));
 259             }
 260             // restore scratch register
 261             move(backupKind, crb, masm, scratch.asValue(backupSlot.getValueKind()), backupSlot);
 262         }
 263     }
 264 
 265     @Opcode("STACKMOVE")
 266     public static final class AMD64PushPopStackMove extends AMD64LIRInstruction implements ValueMoveOp {
 267         public static final LIRInstructionClass<AMD64PushPopStackMove> TYPE = LIRInstructionClass.create(AMD64PushPopStackMove.class);
 268 
 269         @Def({STACK}) protected AllocatableValue result;
 270         @Use({STACK, HINT}) protected AllocatableValue input;
 271         private final OperandSize size;
 272 
 273         public AMD64PushPopStackMove(OperandSize size, AllocatableValue result, AllocatableValue input) {
 274             super(TYPE);
 275             this.result = result;
 276             this.input = input;
 277             this.size = size;
 278         }
 279 
 280         @Override
 281         public AllocatableValue getInput() {
 282             return input;
 283         }
 284 
 285         @Override
 286         public AllocatableValue getResult() {
 287             return result;
 288         }
 289 
 290         @Override
 291         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 292             AMD64MOp.PUSH.emit(masm, size, (AMD64Address) crb.asAddress(input));
 293             AMD64MOp.POP.emit(masm, size, (AMD64Address) crb.asAddress(result));
 294         }
 295     }
 296 
 297     public static final class LeaOp extends AMD64LIRInstruction {
 298         public static final LIRInstructionClass<LeaOp> TYPE = LIRInstructionClass.create(LeaOp.class);
 299 
 300         @Def({REG}) protected AllocatableValue result;
 301         @Use({COMPOSITE, UNINITIALIZED}) protected AMD64AddressValue address;
 302 
 303         public LeaOp(AllocatableValue result, AMD64AddressValue address) {
 304             super(TYPE);
 305             this.result = result;
 306             this.address = address;
 307         }
 308 
 309         @Override
 310         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 311             masm.leaq(asRegister(result, AMD64Kind.QWORD), address.toAddress());
 312         }
 313     }
 314 
 315     public static final class LeaDataOp extends AMD64LIRInstruction {
 316         public static final LIRInstructionClass<LeaDataOp> TYPE = LIRInstructionClass.create(LeaDataOp.class);
 317 
 318         @Def({REG}) protected AllocatableValue result;
 319         private final DataPointerConstant data;
 320 
 321         public LeaDataOp(AllocatableValue result, DataPointerConstant data) {
 322             super(TYPE);
 323             this.result = result;
 324             this.data = data;
 325         }
 326 
 327         @Override
 328         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 329             masm.leaq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(data));
 330         }
 331     }
 332 
 333     public static final class StackLeaOp extends AMD64LIRInstruction {
 334         public static final LIRInstructionClass<StackLeaOp> TYPE = LIRInstructionClass.create(StackLeaOp.class);
 335 
 336         @Def({REG}) protected AllocatableValue result;
 337         @Use({STACK, UNINITIALIZED}) protected AllocatableValue slot;
 338 
 339         public StackLeaOp(AllocatableValue result, AllocatableValue slot) {
 340             super(TYPE);
 341             this.result = result;
 342             this.slot = slot;
 343             assert slot instanceof VirtualStackSlot || slot instanceof StackSlot;
 344         }
 345 
 346         @Override
 347         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 348             masm.leaq(asRegister(result, AMD64Kind.QWORD), (AMD64Address) crb.asAddress(slot));
 349         }
 350     }
 351 
 352     public static final class MembarOp extends AMD64LIRInstruction {
 353         public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class);
 354 
 355         private final int barriers;
 356 
 357         public MembarOp(final int barriers) {
 358             super(TYPE);
 359             this.barriers = barriers;
 360         }
 361 
 362         @Override
 363         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 364             masm.membar(barriers);
 365         }
 366     }
 367 
 368     public static final class NullCheckOp extends AMD64LIRInstruction implements NullCheck {
 369         public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class);
 370 
 371         @Use({COMPOSITE}) protected AMD64AddressValue address;
 372         @State protected LIRFrameState state;
 373 
 374         public NullCheckOp(AMD64AddressValue address, LIRFrameState state) {
 375             super(TYPE);
 376             this.address = address;
 377             this.state = state;
 378         }
 379 
 380         @Override
 381         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 382             crb.recordImplicitException(masm.position(), state);
 383             masm.nullCheck(address.toAddress());
 384         }
 385 
 386         @Override
 387         public Value getCheckedValue() {
 388             return address.base;
 389         }
 390 
 391         @Override
 392         public LIRFrameState getState() {
 393             return state;
 394         }
 395     }
 396 
 397     @Opcode("CAS")
 398     public static final class CompareAndSwapOp extends AMD64LIRInstruction {
 399         public static final LIRInstructionClass<CompareAndSwapOp> TYPE = LIRInstructionClass.create(CompareAndSwapOp.class);
 400 
 401         private final AMD64Kind accessKind;
 402 
 403         @Def protected AllocatableValue result;
 404         @Use({COMPOSITE}) protected AMD64AddressValue address;
 405         @Use protected AllocatableValue cmpValue;
 406         @Use protected AllocatableValue newValue;
 407 
 408         public CompareAndSwapOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue) {
 409             super(TYPE);
 410             this.accessKind = accessKind;
 411             this.result = result;
 412             this.address = address;
 413             this.cmpValue = cmpValue;
 414             this.newValue = newValue;
 415         }
 416 
 417         @Override
 418         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 419             assert asRegister(cmpValue).equals(AMD64.rax) && asRegister(result).equals(AMD64.rax);
 420 
 421             if (crb.target.isMP) {
 422                 masm.lock();
 423             }
 424             switch (accessKind) {
 425                 case DWORD:
 426                     masm.cmpxchgl(asRegister(newValue), address.toAddress());
 427                     break;
 428                 case QWORD:
 429                     masm.cmpxchgq(asRegister(newValue), address.toAddress());
 430                     break;
 431                 default:
 432                     throw GraalError.shouldNotReachHere();
 433             }
 434         }
 435     }
 436 
 437     @Opcode("ATOMIC_READ_AND_ADD")
 438     public static final class AtomicReadAndAddOp extends AMD64LIRInstruction {
 439         public static final LIRInstructionClass<AtomicReadAndAddOp> TYPE = LIRInstructionClass.create(AtomicReadAndAddOp.class);
 440 
 441         private final AMD64Kind accessKind;
 442 
 443         @Def protected AllocatableValue result;
 444         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 445         @Use protected AllocatableValue delta;
 446 
 447         public AtomicReadAndAddOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue delta) {
 448             super(TYPE);
 449             this.accessKind = accessKind;
 450             this.result = result;
 451             this.address = address;
 452             this.delta = delta;
 453         }
 454 
 455         @Override
 456         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 457             move(accessKind, crb, masm, result, delta);
 458             if (crb.target.isMP) {
 459                 masm.lock();
 460             }
 461             switch (accessKind) {
 462                 case DWORD:
 463                     masm.xaddl(address.toAddress(), asRegister(result));
 464                     break;
 465                 case QWORD:
 466                     masm.xaddq(address.toAddress(), asRegister(result));
 467                     break;
 468                 default:
 469                     throw GraalError.shouldNotReachHere();
 470             }
 471         }
 472     }
 473 
 474     @Opcode("ATOMIC_READ_AND_WRITE")
 475     public static final class AtomicReadAndWriteOp extends AMD64LIRInstruction {
 476         public static final LIRInstructionClass<AtomicReadAndWriteOp> TYPE = LIRInstructionClass.create(AtomicReadAndWriteOp.class);
 477 
 478         private final AMD64Kind accessKind;
 479 
 480         @Def protected AllocatableValue result;
 481         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 482         @Use protected AllocatableValue newValue;
 483 
 484         public AtomicReadAndWriteOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue newValue) {
 485             super(TYPE);
 486             this.accessKind = accessKind;
 487             this.result = result;
 488             this.address = address;
 489             this.newValue = newValue;
 490         }
 491 
 492         @Override
 493         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 494             move(accessKind, crb, masm, result, newValue);
 495             switch (accessKind) {
 496                 case DWORD:
 497                     masm.xchgl(asRegister(result), address.toAddress());
 498                     break;
 499                 case QWORD:
 500                     masm.xchgq(asRegister(result), address.toAddress());
 501                     break;
 502                 default:
 503                     throw GraalError.shouldNotReachHere();
 504             }
 505         }
 506     }
 507 
 508     public static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 509         move((AMD64Kind) result.getPlatformKind(), crb, masm, result, input);
 510     }
 511 
 512     public static void move(AMD64Kind moveKind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 513         if (isRegister(input)) {
 514             if (isRegister(result)) {
 515                 reg2reg(moveKind, masm, result, input);
 516             } else if (isStackSlot(result)) {
 517                 reg2stack(moveKind, crb, masm, result, asRegister(input));
 518             } else {
 519                 throw GraalError.shouldNotReachHere();
 520             }
 521         } else if (isStackSlot(input)) {
 522             if (isRegister(result)) {
 523                 stack2reg(moveKind, crb, masm, asRegister(result), input);
 524             } else {
 525                 throw GraalError.shouldNotReachHere();
 526             }
 527         } else if (isJavaConstant(input)) {
 528             if (isRegister(result)) {
 529                 const2reg(crb, masm, asRegister(result), asJavaConstant(input));
 530             } else if (isStackSlot(result)) {
 531                 const2stack(crb, masm, result, asJavaConstant(input));
 532             } else {
 533                 throw GraalError.shouldNotReachHere();
 534             }
 535         } else {
 536             throw GraalError.shouldNotReachHere();
 537         }
 538     }
 539 
 540     private static void reg2reg(AMD64Kind kind, AMD64MacroAssembler masm, Value result, Value input) {
 541         if (asRegister(input).equals(asRegister(result))) {
 542             return;
 543         }
 544         switch (kind) {
 545             case BYTE:
 546             case WORD:
 547             case DWORD:
 548                 masm.movl(asRegister(result), asRegister(input));
 549                 break;
 550             case QWORD:
 551                 masm.movq(asRegister(result), asRegister(input));
 552                 break;
 553             case SINGLE:
 554                 masm.movflt(asRegister(result, AMD64Kind.SINGLE), asRegister(input, AMD64Kind.SINGLE));
 555                 break;
 556             case DOUBLE:
 557                 masm.movdbl(asRegister(result, AMD64Kind.DOUBLE), asRegister(input, AMD64Kind.DOUBLE));
 558                 break;
 559             default:
 560                 throw GraalError.shouldNotReachHere("kind=" + kind);
 561         }
 562     }
 563 
 564     public static void reg2stack(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Register input) {
 565         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 566         switch (kind) {
 567             case BYTE:
 568                 masm.movb(dest, input);
 569                 break;
 570             case WORD:
 571                 masm.movw(dest, input);
 572                 break;
 573             case DWORD:
 574                 masm.movl(dest, input);
 575                 break;
 576             case QWORD:
 577                 masm.movq(dest, input);
 578                 break;
 579             case SINGLE:
 580                 masm.movflt(dest, input);
 581                 break;
 582             case DOUBLE:
 583                 masm.movsd(dest, input);
 584                 break;
 585             default:
 586                 throw GraalError.shouldNotReachHere();
 587         }
 588     }
 589 
 590     public static void stack2reg(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Value input) {
 591         AMD64Address src = (AMD64Address) crb.asAddress(input);
 592         switch (kind) {
 593             case BYTE:
 594                 masm.movsbl(result, src);
 595                 break;
 596             case WORD:
 597                 masm.movswl(result, src);
 598                 break;
 599             case DWORD:
 600                 masm.movl(result, src);
 601                 break;
 602             case QWORD:
 603                 masm.movq(result, src);
 604                 break;
 605             case SINGLE:
 606                 masm.movflt(result, src);
 607                 break;
 608             case DOUBLE:
 609                 masm.movdbl(result, src);
 610                 break;
 611             default:
 612                 throw GraalError.shouldNotReachHere();
 613         }
 614     }
 615 
 616     public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, JavaConstant input) {
 617         /*
 618          * Note: we use the kind of the input operand (and not the kind of the result operand)
 619          * because they don't match in all cases. For example, an object constant can be loaded to a
 620          * long register when unsafe casts occurred (e.g., for a write barrier where arithmetic
 621          * operations are then performed on the pointer).
 622          */
 623         switch (input.getJavaKind().getStackKind()) {
 624             case Int:
 625                 // Do not optimize with an XOR as this instruction may be between
 626                 // a CMP and a Jcc in which case the XOR will modify the condition
 627                 // flags and interfere with the Jcc.
 628                 masm.movl(result, input.asInt());
 629 
 630                 break;
 631             case Long:
 632                 // Do not optimize with an XOR as this instruction may be between
 633                 // a CMP and a Jcc in which case the XOR will modify the condition
 634                 // flags and interfere with the Jcc.
 635                 if (input.asLong() == (int) input.asLong()) {
 636                     // Sign extended to long
 637                     masm.movslq(result, (int) input.asLong());
 638                 } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) {
 639                     // Zero extended to long
 640                     masm.movl(result, (int) input.asLong());
 641                 } else {
 642                     masm.movq(result, input.asLong());
 643                 }
 644                 break;
 645             case Float:
 646                 // This is *not* the same as 'constant == 0.0f' in the case where constant is -0.0f
 647                 if (Float.floatToRawIntBits(input.asFloat()) == Float.floatToRawIntBits(0.0f)) {
 648                     masm.xorps(result, result);
 649                 } else {
 650                     masm.movflt(result, (AMD64Address) crb.asFloatConstRef(input));
 651                 }
 652                 break;
 653             case Double:
 654                 // This is *not* the same as 'constant == 0.0d' in the case where constant is -0.0d
 655                 if (Double.doubleToRawLongBits(input.asDouble()) == Double.doubleToRawLongBits(0.0d)) {
 656                     masm.xorpd(result, result);
 657                 } else {
 658                     masm.movdbl(result, (AMD64Address) crb.asDoubleConstRef(input));
 659                 }
 660                 break;
 661             case Object:
 662                 // Do not optimize with an XOR as this instruction may be between
 663                 // a CMP and a Jcc in which case the XOR will modify the condition
 664                 // flags and interfere with the Jcc.
 665                 if (input.isNull()) {
 666                     masm.movq(result, 0x0L);
 667                 } else if (crb.target.inlineObjects) {
 668                     crb.recordInlineDataInCode(input);
 669                     masm.movq(result, 0xDEADDEADDEADDEADL);
 670                 } else {
 671                     masm.movq(result, (AMD64Address) crb.recordDataReferenceInCode(input, 0));
 672                 }
 673                 break;
 674             default:
 675                 throw GraalError.shouldNotReachHere();
 676         }
 677     }
 678 
 679     public static boolean canMoveConst2Stack(JavaConstant input) {
 680         switch (input.getJavaKind().getStackKind()) {
 681             case Int:
 682                 break;
 683             case Long:
 684                 break;
 685             case Float:
 686                 break;
 687             case Double:
 688                 break;
 689             case Object:
 690                 if (input.isNull()) {
 691                     return true;
 692                 } else {
 693                     return false;
 694                 }
 695             default:
 696                 return false;
 697         }
 698         return true;
 699     }
 700 
 701     public static void const2stack(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, JavaConstant input) {
 702         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 703         final long imm;
 704         switch (input.getJavaKind().getStackKind()) {
 705             case Int:
 706                 imm = input.asInt();
 707                 break;
 708             case Long:
 709                 imm = input.asLong();
 710                 break;
 711             case Float:
 712                 imm = floatToRawIntBits(input.asFloat());
 713                 break;
 714             case Double:
 715                 imm = doubleToRawLongBits(input.asDouble());
 716                 break;
 717             case Object:
 718                 if (input.isNull()) {
 719                     imm = 0;
 720                 } else {
 721                     throw GraalError.shouldNotReachHere("Non-null object constants must be in register");
 722                 }
 723                 break;
 724             default:
 725                 throw GraalError.shouldNotReachHere();
 726         }
 727 
 728         switch ((AMD64Kind) result.getPlatformKind()) {
 729             case BYTE:
 730                 assert NumUtil.isByte(imm) : "Is not in byte range: " + imm;
 731                 AMD64MIOp.MOVB.emit(masm, OperandSize.BYTE, dest, (int) imm);
 732                 break;
 733             case WORD:
 734                 assert NumUtil.isShort(imm) : "Is not in short range: " + imm;
 735                 AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm);
 736                 break;
 737             case DWORD:
 738             case SINGLE:
 739                 assert NumUtil.isInt(imm) : "Is not in int range: " + imm;
 740                 masm.movl(dest, (int) imm);
 741                 break;
 742             case QWORD:
 743             case DOUBLE:
 744                 masm.movlong(dest, imm);
 745                 break;
 746             default:
 747                 throw GraalError.shouldNotReachHere("Unknown result Kind: " + result.getPlatformKind());
 748         }
 749     }
 750 
 751     public abstract static class Pointer extends AMD64LIRInstruction {
 752         protected final LIRKindTool lirKindTool;
 753         protected final CompressEncoding encoding;
 754         protected final boolean nonNull;
 755 
 756         @Def({REG, HINT}) private AllocatableValue result;
 757         @Use({REG}) private AllocatableValue input;
 758         @Alive({REG, ILLEGAL}) private AllocatableValue baseRegister;
 759 
 760         protected Pointer(LIRInstructionClass<? extends Pointer> type, AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull,
 761                         LIRKindTool lirKindTool) {
 762             super(type);
 763             this.result = result;
 764             this.input = input;
 765             this.baseRegister = baseRegister;
 766             this.encoding = encoding;
 767             this.nonNull = nonNull;
 768             this.lirKindTool = lirKindTool;
 769         }
 770 
 771         protected boolean hasBase(CompilationResultBuilder crb) {
 772             return GeneratePIC.getValue(crb.getOptions()) || encoding.hasBase();
 773         }
 774 
 775         protected final Register getResultRegister() {
 776             return asRegister(result);
 777         }
 778 
 779         protected final Register getBaseRegister() {
 780             return asRegister(baseRegister);
 781         }
 782 
 783         protected final int getShift() {
 784             return encoding.getShift();
 785         }
 786 
 787         protected final void move(LIRKind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 788             AMD64Move.move((AMD64Kind) kind.getPlatformKind(), crb, masm, result, input);
 789         }
 790     }
 791 
 792     public static final class CompressPointer extends Pointer {
 793         public static final LIRInstructionClass<CompressPointer> TYPE = LIRInstructionClass.create(CompressPointer.class);
 794 
 795         public CompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 796             super(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 797         }
 798 
 799         @Override
 800         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 801             move(lirKindTool.getObjectKind(), crb, masm);
 802 
 803             Register resReg = getResultRegister();
 804             if (hasBase(crb)) {
 805                 Register baseReg = getBaseRegister();
 806                 if (!nonNull) {
 807                     masm.testq(resReg, resReg);
 808                     masm.cmovq(Equal, resReg, baseReg);
 809                 }
 810                 masm.subq(resReg, baseReg);
 811             }
 812 
 813             int shift = getShift();
 814             if (shift != 0) {
 815                 masm.shrq(resReg, shift);
 816             }
 817         }
 818     }
 819 
 820     public static final class UncompressPointer extends Pointer {
 821         public static final LIRInstructionClass<UncompressPointer> TYPE = LIRInstructionClass.create(UncompressPointer.class);
 822 
 823         public UncompressPointer(AllocatableValue result, AllocatableValue input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 824             super(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 825         }
 826 
 827         @Override
 828         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 829             move(lirKindTool.getNarrowOopKind(), crb, masm);
 830 
 831             Register resReg = getResultRegister();
 832             int shift = getShift();
 833             if (shift != 0) {
 834                 masm.shlq(resReg, shift);
 835             }
 836 
 837             if (hasBase(crb)) {
 838                 Register baseReg = getBaseRegister();
 839                 if (nonNull) {
 840                     masm.addq(resReg, baseReg);
 841                     return;
 842                 }
 843 
 844                 if (shift == 0) {
 845                     // if encoding.shift != 0, the flags are already set by the shlq
 846                     masm.testq(resReg, resReg);
 847                 }
 848 
 849                 Label done = new Label();
 850                 masm.jccb(Equal, done);
 851                 masm.addq(resReg, baseReg);
 852                 masm.bind(done);
 853             }
 854         }
 855     }
 856 }