1 /*
   2  * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.amd64;
  26 
  27 import static java.lang.Double.doubleToRawLongBits;
  28 import static java.lang.Float.floatToRawIntBits;
  29 import static jdk.vm.ci.code.ValueUtil.asRegister;
  30 import static jdk.vm.ci.code.ValueUtil.isRegister;
  31 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag.Equal;
  33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag.NotEqual;
  34 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  35 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  36 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  37 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  38 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  39 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  40 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  41 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  42 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  43 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  44 
  45 import org.graalvm.compiler.asm.Label;
  46 import org.graalvm.compiler.asm.amd64.AMD64Address;
  47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
  49 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
  50 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  51 import org.graalvm.compiler.core.common.CompressEncoding;
  52 import org.graalvm.compiler.core.common.LIRKind;
  53 import org.graalvm.compiler.core.common.NumUtil;
  54 import org.graalvm.compiler.core.common.spi.LIRKindTool;
  55 import org.graalvm.compiler.core.common.type.DataPointerConstant;
  56 import org.graalvm.compiler.debug.GraalError;
  57 import org.graalvm.compiler.lir.LIRFrameState;
  58 import org.graalvm.compiler.lir.LIRInstructionClass;
  59 import org.graalvm.compiler.lir.Opcode;
  60 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
  61 import org.graalvm.compiler.lir.StandardOp.NullCheck;
  62 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  63 import org.graalvm.compiler.lir.VirtualStackSlot;
  64 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  65 import org.graalvm.compiler.options.OptionValues;
  66 
  67 import jdk.vm.ci.amd64.AMD64;
  68 import jdk.vm.ci.amd64.AMD64Kind;
  69 import jdk.vm.ci.code.Register;
  70 import jdk.vm.ci.code.RegisterValue;
  71 import jdk.vm.ci.code.StackSlot;
  72 import jdk.vm.ci.meta.AllocatableValue;
  73 import jdk.vm.ci.meta.Constant;
  74 import jdk.vm.ci.meta.JavaConstant;
  75 import jdk.vm.ci.meta.Value;
  76 
  77 public class AMD64Move {
  78 
  79     private abstract static class AbstractMoveOp extends AMD64LIRInstruction implements ValueMoveOp {
  80         public static final LIRInstructionClass<AbstractMoveOp> TYPE = LIRInstructionClass.create(AbstractMoveOp.class);
  81 
  82         private AMD64Kind moveKind;
  83 
  84         protected AbstractMoveOp(LIRInstructionClass<? extends AbstractMoveOp> c, AMD64Kind moveKind) {
  85             super(c);
  86             this.moveKind = moveKind;
  87         }
  88 
  89         @Override
  90         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  91             move(moveKind, crb, masm, getResult(), getInput());
  92         }
  93     }
  94 
  95     @Opcode("MOVE")
  96     public static final class MoveToRegOp extends AbstractMoveOp {
  97         public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);
  98 
  99         @Def({REG, HINT}) protected AllocatableValue result;
 100         @Use({REG, STACK}) protected AllocatableValue input;
 101 
 102         public MoveToRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
 103             super(TYPE, moveKind);
 104             this.result = result;
 105             this.input = input;
 106         }
 107 
 108         @Override
 109         public AllocatableValue getInput() {
 110             return input;
 111         }
 112 
 113         @Override
 114         public AllocatableValue getResult() {
 115             return result;
 116         }
 117     }
 118 
 119     @Opcode("MOVE")
 120     public static final class MoveFromRegOp extends AbstractMoveOp {
 121         public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class);
 122 
 123         @Def({REG, STACK}) protected AllocatableValue result;
 124         @Use({REG, HINT}) protected AllocatableValue input;
 125 
 126         public MoveFromRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
 127             super(TYPE, moveKind);
 128             this.result = result;
 129             this.input = input;
 130         }
 131 
 132         @Override
 133         public AllocatableValue getInput() {
 134             return input;
 135         }
 136 
 137         @Override
 138         public AllocatableValue getResult() {
 139             return result;
 140         }
 141     }
 142 
 143     @Opcode("MOVE")
 144     public static class MoveFromConstOp extends AMD64LIRInstruction implements LoadConstantOp {
 145         public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);
 146 
 147         @Def({REG, STACK}) protected AllocatableValue result;
 148         private final JavaConstant input;
 149 
 150         public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
 151             super(TYPE);
 152             this.result = result;
 153             this.input = input;
 154         }
 155 
 156         @Override
 157         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 158             if (isRegister(result)) {
 159                 const2reg(crb, masm, asRegister(result), input);
 160             } else {
 161                 assert isStackSlot(result);
 162                 const2stack(crb, masm, result, input);
 163             }
 164         }
 165 
 166         @Override
 167         public Constant getConstant() {
 168             return input;
 169         }
 170 
 171         @Override
 172         public AllocatableValue getResult() {
 173             return result;
 174         }
 175     }
 176 
 177     @Opcode("STACKMOVE")
 178     public static final class AMD64StackMove extends AMD64LIRInstruction implements ValueMoveOp {
 179         public static final LIRInstructionClass<AMD64StackMove> TYPE = LIRInstructionClass.create(AMD64StackMove.class);
 180 
 181         @Def({STACK}) protected AllocatableValue result;
 182         @Use({STACK, HINT}) protected AllocatableValue input;
 183         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 184 
 185         private Register scratch;
 186 
 187         public AMD64StackMove(AllocatableValue result, AllocatableValue input, Register scratch, AllocatableValue backupSlot) {
 188             super(TYPE);
 189             this.result = result;
 190             this.input = input;
 191             this.backupSlot = backupSlot;
 192             this.scratch = scratch;
 193         }
 194 
 195         @Override
 196         public AllocatableValue getInput() {
 197             return input;
 198         }
 199 
 200         @Override
 201         public AllocatableValue getResult() {
 202             return result;
 203         }
 204 
 205         public Register getScratchRegister() {
 206             return scratch;
 207         }
 208 
 209         public AllocatableValue getBackupSlot() {
 210             return backupSlot;
 211         }
 212 
 213         @Override
 214         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 215             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 216             if (backupKind.isXMM()) {
 217                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 218                 backupKind = AMD64Kind.DOUBLE;
 219             }
 220 
 221             // backup scratch register
 222             reg2stack(backupKind, crb, masm, backupSlot, scratch);
 223             // move stack slot
 224             stack2reg((AMD64Kind) getInput().getPlatformKind(), crb, masm, scratch, getInput());
 225             reg2stack((AMD64Kind) getResult().getPlatformKind(), crb, masm, getResult(), scratch);
 226             // restore scratch register
 227             stack2reg(backupKind, crb, masm, scratch, backupSlot);
 228         }
 229     }
 230 
 231     @Opcode("MULTISTACKMOVE")
 232     public static final class AMD64MultiStackMove extends AMD64LIRInstruction {
 233         public static final LIRInstructionClass<AMD64MultiStackMove> TYPE = LIRInstructionClass.create(AMD64MultiStackMove.class);
 234 
 235         @Def({STACK}) protected AllocatableValue[] results;
 236         @Use({STACK}) protected Value[] inputs;
 237         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 238 
 239         private Register scratch;
 240 
 241         public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Register scratch, AllocatableValue backupSlot) {
 242             super(TYPE);
 243             this.results = results;
 244             this.inputs = inputs;
 245             this.backupSlot = backupSlot;
 246             this.scratch = scratch;
 247         }
 248 
 249         @Override
 250         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 251             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 252             if (backupKind.isXMM()) {
 253                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 254                 backupKind = AMD64Kind.DOUBLE;
 255             }
 256 
 257             // backup scratch register
 258             move(backupKind, crb, masm, backupSlot, scratch.asValue(backupSlot.getValueKind()));
 259             for (int i = 0; i < results.length; i++) {
 260                 Value input = inputs[i];
 261                 AllocatableValue result = results[i];
 262                 // move stack slot
 263                 move((AMD64Kind) input.getPlatformKind(), crb, masm, scratch.asValue(input.getValueKind()), input);
 264                 move((AMD64Kind) result.getPlatformKind(), crb, masm, result, scratch.asValue(result.getValueKind()));
 265             }
 266             // restore scratch register
 267             move(backupKind, crb, masm, scratch.asValue(backupSlot.getValueKind()), backupSlot);
 268         }
 269     }
 270 
 271     @Opcode("STACKMOVE")
 272     public static final class AMD64PushPopStackMove extends AMD64LIRInstruction implements ValueMoveOp {
 273         public static final LIRInstructionClass<AMD64PushPopStackMove> TYPE = LIRInstructionClass.create(AMD64PushPopStackMove.class);
 274 
 275         @Def({STACK}) protected AllocatableValue result;
 276         @Use({STACK, HINT}) protected AllocatableValue input;
 277         private final OperandSize size;
 278 
 279         public AMD64PushPopStackMove(OperandSize size, AllocatableValue result, AllocatableValue input) {
 280             super(TYPE);
 281             this.result = result;
 282             this.input = input;
 283             this.size = size;
 284         }
 285 
 286         @Override
 287         public AllocatableValue getInput() {
 288             return input;
 289         }
 290 
 291         @Override
 292         public AllocatableValue getResult() {
 293             return result;
 294         }
 295 
 296         @Override
 297         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 298             AMD64MOp.PUSH.emit(masm, size, (AMD64Address) crb.asAddress(input));
 299             AMD64MOp.POP.emit(masm, size, (AMD64Address) crb.asAddress(result));
 300         }
 301     }
 302 
 303     public static final class LeaOp extends AMD64LIRInstruction {
 304         public static final LIRInstructionClass<LeaOp> TYPE = LIRInstructionClass.create(LeaOp.class);
 305 
 306         @Def({REG}) protected AllocatableValue result;
 307         @Use({COMPOSITE, UNINITIALIZED}) protected AMD64AddressValue address;
 308         private final OperandSize size;
 309 
 310         public LeaOp(AllocatableValue result, AMD64AddressValue address, OperandSize size) {
 311             super(TYPE);
 312             this.result = result;
 313             this.address = address;
 314             this.size = size;
 315         }
 316 
 317         @Override
 318         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 319             if (size == OperandSize.QWORD) {
 320                 masm.leaq(asRegister(result, AMD64Kind.QWORD), address.toAddress());
 321             } else {
 322                 assert size == OperandSize.DWORD;
 323                 masm.lead(asRegister(result, AMD64Kind.DWORD), address.toAddress());
 324             }
 325         }
 326     }
 327 
 328     public static final class LeaDataOp extends AMD64LIRInstruction {
 329         public static final LIRInstructionClass<LeaDataOp> TYPE = LIRInstructionClass.create(LeaDataOp.class);
 330 
 331         @Def({REG}) protected AllocatableValue result;
 332         private final DataPointerConstant data;
 333 
 334         public LeaDataOp(AllocatableValue result, DataPointerConstant data) {
 335             super(TYPE);
 336             this.result = result;
 337             this.data = data;
 338         }
 339 
 340         @Override
 341         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 342             masm.leaq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(data));
 343         }
 344     }
 345 
 346     public static final class StackLeaOp extends AMD64LIRInstruction {
 347         public static final LIRInstructionClass<StackLeaOp> TYPE = LIRInstructionClass.create(StackLeaOp.class);
 348 
 349         @Def({REG}) protected AllocatableValue result;
 350         @Use({STACK, UNINITIALIZED}) protected AllocatableValue slot;
 351 
 352         public StackLeaOp(AllocatableValue result, AllocatableValue slot) {
 353             super(TYPE);
 354             this.result = result;
 355             this.slot = slot;
 356             assert slot instanceof VirtualStackSlot || slot instanceof StackSlot;
 357         }
 358 
 359         @Override
 360         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 361             masm.leaq(asRegister(result, AMD64Kind.QWORD), (AMD64Address) crb.asAddress(slot));
 362         }
 363     }
 364 
 365     public static final class MembarOp extends AMD64LIRInstruction {
 366         public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class);
 367 
 368         private final int barriers;
 369 
 370         public MembarOp(final int barriers) {
 371             super(TYPE);
 372             this.barriers = barriers;
 373         }
 374 
 375         @Override
 376         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 377             masm.membar(barriers);
 378         }
 379     }
 380 
 381     public static final class NullCheckOp extends AMD64LIRInstruction implements NullCheck {
 382         public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class);
 383 
 384         @Use({COMPOSITE}) protected AMD64AddressValue address;
 385         @State protected LIRFrameState state;
 386 
 387         public NullCheckOp(AMD64AddressValue address, LIRFrameState state) {
 388             super(TYPE);
 389             this.address = address;
 390             this.state = state;
 391         }
 392 
 393         @Override
 394         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 395             crb.recordImplicitException(masm.position(), state);
 396             masm.nullCheck(address.toAddress());
 397         }
 398 
 399         @Override
 400         public Value getCheckedValue() {
 401             return address.base;
 402         }
 403 
 404         @Override
 405         public LIRFrameState getState() {
 406             return state;
 407         }
 408     }
 409 
 410     @Opcode("CAS")
 411     public static final class CompareAndSwapOp extends AMD64LIRInstruction {
 412         public static final LIRInstructionClass<CompareAndSwapOp> TYPE = LIRInstructionClass.create(CompareAndSwapOp.class);
 413 
 414         private final AMD64Kind accessKind;
 415 
 416         @Def protected AllocatableValue result;
 417         @Use({COMPOSITE}) protected AMD64AddressValue address;
 418         @Use protected AllocatableValue cmpValue;
 419         @Use protected AllocatableValue newValue;
 420 
 421         public CompareAndSwapOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue) {
 422             super(TYPE);
 423             this.accessKind = accessKind;
 424             this.result = result;
 425             this.address = address;
 426             this.cmpValue = cmpValue;
 427             this.newValue = newValue;
 428         }
 429 
 430         @Override
 431         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 432             assert asRegister(cmpValue).equals(AMD64.rax) && asRegister(result).equals(AMD64.rax);
 433 
 434             if (crb.target.isMP) {
 435                 masm.lock();
 436             }
 437             switch (accessKind) {
 438                 case BYTE:
 439                     masm.cmpxchgb(asRegister(newValue), address.toAddress());
 440                     break;
 441                 case WORD:
 442                     masm.cmpxchgw(asRegister(newValue), address.toAddress());
 443                     break;
 444                 case DWORD:
 445                     masm.cmpxchgl(asRegister(newValue), address.toAddress());
 446                     break;
 447                 case QWORD:
 448                     masm.cmpxchgq(asRegister(newValue), address.toAddress());
 449                     break;
 450                 default:
 451                     throw GraalError.shouldNotReachHere();
 452             }
 453         }
 454     }
 455 
 456     @Opcode("ATOMIC_READ_AND_ADD")
 457     public static final class AtomicReadAndAddOp extends AMD64LIRInstruction {
 458         public static final LIRInstructionClass<AtomicReadAndAddOp> TYPE = LIRInstructionClass.create(AtomicReadAndAddOp.class);
 459 
 460         private final AMD64Kind accessKind;
 461 
 462         @Def protected AllocatableValue result;
 463         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 464         @Use protected AllocatableValue delta;
 465 
 466         public AtomicReadAndAddOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue delta) {
 467             super(TYPE);
 468             this.accessKind = accessKind;
 469             this.result = result;
 470             this.address = address;
 471             this.delta = delta;
 472         }
 473 
 474         @Override
 475         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 476             move(accessKind, crb, masm, result, delta);
 477             if (crb.target.isMP) {
 478                 masm.lock();
 479             }
 480             switch (accessKind) {
 481                 case BYTE:
 482                     masm.xaddb(address.toAddress(), asRegister(result));
 483                     break;
 484                 case WORD:
 485                     masm.xaddw(address.toAddress(), asRegister(result));
 486                     break;
 487                 case DWORD:
 488                     masm.xaddl(address.toAddress(), asRegister(result));
 489                     break;
 490                 case QWORD:
 491                     masm.xaddq(address.toAddress(), asRegister(result));
 492                     break;
 493                 default:
 494                     throw GraalError.shouldNotReachHere();
 495             }
 496         }
 497     }
 498 
 499     @Opcode("ATOMIC_READ_AND_WRITE")
 500     public static final class AtomicReadAndWriteOp extends AMD64LIRInstruction {
 501         public static final LIRInstructionClass<AtomicReadAndWriteOp> TYPE = LIRInstructionClass.create(AtomicReadAndWriteOp.class);
 502 
 503         private final AMD64Kind accessKind;
 504 
 505         @Def protected AllocatableValue result;
 506         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 507         @Use protected AllocatableValue newValue;
 508 
 509         public AtomicReadAndWriteOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue newValue) {
 510             super(TYPE);
 511             this.accessKind = accessKind;
 512             this.result = result;
 513             this.address = address;
 514             this.newValue = newValue;
 515         }
 516 
 517         @Override
 518         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 519             move(accessKind, crb, masm, result, newValue);
 520             switch (accessKind) {
 521                 case BYTE:
 522                     masm.xchgb(asRegister(result), address.toAddress());
 523                     break;
 524                 case WORD:
 525                     masm.xchgw(asRegister(result), address.toAddress());
 526                     break;
 527                 case DWORD:
 528                     masm.xchgl(asRegister(result), address.toAddress());
 529                     break;
 530                 case QWORD:
 531                     masm.xchgq(asRegister(result), address.toAddress());
 532                     break;
 533                 default:
 534                     throw GraalError.shouldNotReachHere();
 535             }
 536         }
 537     }
 538 
 539     public static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 540         move((AMD64Kind) result.getPlatformKind(), crb, masm, result, input);
 541     }
 542 
 543     public static void move(AMD64Kind moveKind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 544         if (isRegister(input)) {
 545             if (isRegister(result)) {
 546                 reg2reg(moveKind, masm, result, input);
 547             } else if (isStackSlot(result)) {
 548                 reg2stack(moveKind, crb, masm, result, asRegister(input));
 549             } else {
 550                 throw GraalError.shouldNotReachHere();
 551             }
 552         } else if (isStackSlot(input)) {
 553             if (isRegister(result)) {
 554                 stack2reg(moveKind, crb, masm, asRegister(result), input);
 555             } else {
 556                 throw GraalError.shouldNotReachHere();
 557             }
 558         } else if (isJavaConstant(input)) {
 559             if (isRegister(result)) {
 560                 const2reg(crb, masm, asRegister(result), asJavaConstant(input));
 561             } else if (isStackSlot(result)) {
 562                 const2stack(crb, masm, result, asJavaConstant(input));
 563             } else {
 564                 throw GraalError.shouldNotReachHere();
 565             }
 566         } else {
 567             throw GraalError.shouldNotReachHere();
 568         }
 569     }
 570 
 571     private static void reg2reg(AMD64Kind kind, AMD64MacroAssembler masm, Value result, Value input) {
 572         if (asRegister(input).equals(asRegister(result))) {
 573             return;
 574         }
 575         assert asRegister(result).getRegisterCategory().equals(asRegister(input).getRegisterCategory());
 576         switch (kind) {
 577             case BYTE:
 578             case WORD:
 579             case DWORD:
 580                 masm.movl(asRegister(result), asRegister(input));
 581                 break;
 582             case QWORD:
 583                 masm.movq(asRegister(result), asRegister(input));
 584                 break;
 585             case SINGLE:
 586                 masm.movflt(asRegister(result, AMD64Kind.SINGLE), asRegister(input, AMD64Kind.SINGLE));
 587                 break;
 588             case DOUBLE:
 589                 masm.movdbl(asRegister(result, AMD64Kind.DOUBLE), asRegister(input, AMD64Kind.DOUBLE));
 590                 break;
 591             default:
 592                 throw GraalError.shouldNotReachHere("kind=" + kind);
 593         }
 594     }
 595 
 596     public static void reg2stack(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Register input) {
 597         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 598         switch (kind) {
 599             case BYTE:
 600                 masm.movb(dest, input);
 601                 break;
 602             case WORD:
 603                 masm.movw(dest, input);
 604                 break;
 605             case DWORD:
 606                 masm.movl(dest, input);
 607                 break;
 608             case QWORD:
 609                 masm.movq(dest, input);
 610                 break;
 611             case SINGLE:
 612                 masm.movflt(dest, input);
 613                 break;
 614             case DOUBLE:
 615                 masm.movsd(dest, input);
 616                 break;
 617             default:
 618                 throw GraalError.shouldNotReachHere();
 619         }
 620     }
 621 
 622     public static void stack2reg(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Value input) {
 623         AMD64Address src = (AMD64Address) crb.asAddress(input);
 624         switch (kind) {
 625             case BYTE:
 626                 masm.movsbl(result, src);
 627                 break;
 628             case WORD:
 629                 masm.movswl(result, src);
 630                 break;
 631             case DWORD:
 632                 masm.movl(result, src);
 633                 break;
 634             case QWORD:
 635                 masm.movq(result, src);
 636                 break;
 637             case SINGLE:
 638                 masm.movflt(result, src);
 639                 break;
 640             case DOUBLE:
 641                 masm.movdbl(result, src);
 642                 break;
 643             default:
 644                 throw GraalError.shouldNotReachHere();
 645         }
 646     }
 647 
 648     public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, JavaConstant input) {
 649         /*
 650          * Note: we use the kind of the input operand (and not the kind of the result operand)
 651          * because they don't match in all cases. For example, an object constant can be loaded to a
 652          * long register when unsafe casts occurred (e.g., for a write barrier where arithmetic
 653          * operations are then performed on the pointer).
 654          */
 655         switch (input.getJavaKind().getStackKind()) {
 656             case Int:
 657                 // Do not optimize with an XOR as this instruction may be between
 658                 // a CMP and a Jcc in which case the XOR will modify the condition
 659                 // flags and interfere with the Jcc.
 660                 masm.movl(result, input.asInt());
 661 
 662                 break;
 663             case Long:
 664                 // Do not optimize with an XOR as this instruction may be between
 665                 // a CMP and a Jcc in which case the XOR will modify the condition
 666                 // flags and interfere with the Jcc.
 667                 if (input.asLong() == (int) input.asLong()) {
 668                     // Sign extended to long
 669                     masm.movslq(result, (int) input.asLong());
 670                 } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) {
 671                     // Zero extended to long
 672                     masm.movl(result, (int) input.asLong());
 673                 } else {
 674                     masm.movq(result, input.asLong());
 675                 }
 676                 break;
 677             case Float:
 678                 // This is *not* the same as 'constant == 0.0f' in the case where constant is -0.0f
 679                 if (Float.floatToRawIntBits(input.asFloat()) == Float.floatToRawIntBits(0.0f)) {
 680                     masm.xorps(result, result);
 681                 } else {
 682                     masm.movflt(result, (AMD64Address) crb.asFloatConstRef(input));
 683                 }
 684                 break;
 685             case Double:
 686                 // This is *not* the same as 'constant == 0.0d' in the case where constant is -0.0d
 687                 if (Double.doubleToRawLongBits(input.asDouble()) == Double.doubleToRawLongBits(0.0d)) {
 688                     masm.xorpd(result, result);
 689                 } else {
 690                     masm.movdbl(result, (AMD64Address) crb.asDoubleConstRef(input));
 691                 }
 692                 break;
 693             case Object:
 694                 // Do not optimize with an XOR as this instruction may be between
 695                 // a CMP and a Jcc in which case the XOR will modify the condition
 696                 // flags and interfere with the Jcc.
 697                 if (input.isNull()) {
 698                     if (crb.mustReplaceWithNullRegister(input)) {
 699                         masm.movq(result, crb.nullRegister);
 700                     } else {
 701                         masm.movslq(result, 0);
 702                     }
 703                 } else if (crb.target.inlineObjects) {
 704                     crb.recordInlineDataInCode(input);
 705                     masm.movq(result, 0xDEADDEADDEADDEADL, true);
 706                 } else {
 707                     masm.movq(result, (AMD64Address) crb.recordDataReferenceInCode(input, 0));
 708                 }
 709                 break;
 710             default:
 711                 throw GraalError.shouldNotReachHere();
 712         }
 713     }
 714 
 715     public static boolean canMoveConst2Stack(JavaConstant input) {
 716         switch (input.getJavaKind().getStackKind()) {
 717             case Int:
 718                 break;
 719             case Long:
 720                 break;
 721             case Float:
 722                 break;
 723             case Double:
 724                 break;
 725             case Object:
 726                 if (input.isNull()) {
 727                     return true;
 728                 } else {
 729                     return false;
 730                 }
 731             default:
 732                 return false;
 733         }
 734         return true;
 735     }
 736 
 737     public static void const2stack(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, JavaConstant input) {
 738         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 739         final long imm;
 740         switch (input.getJavaKind().getStackKind()) {
 741             case Int:
 742                 imm = input.asInt();
 743                 break;
 744             case Long:
 745                 imm = input.asLong();
 746                 break;
 747             case Float:
 748                 imm = floatToRawIntBits(input.asFloat());
 749                 break;
 750             case Double:
 751                 imm = doubleToRawLongBits(input.asDouble());
 752                 break;
 753             case Object:
 754                 if (input.isNull()) {
 755                     if (crb.mustReplaceWithNullRegister(input)) {
 756                         masm.movq(dest, crb.nullRegister);
 757                         return;
 758                     }
 759                     imm = 0;
 760                 } else {
 761                     throw GraalError.shouldNotReachHere("Non-null object constants must be in register");
 762                 }
 763                 break;
 764             default:
 765                 throw GraalError.shouldNotReachHere();
 766         }
 767 
 768         switch ((AMD64Kind) result.getPlatformKind()) {
 769             case BYTE:
 770                 assert NumUtil.isByte(imm) : "Is not in byte range: " + imm;
 771                 AMD64MIOp.MOVB.emit(masm, OperandSize.BYTE, dest, (int) imm);
 772                 break;
 773             case WORD:
 774                 assert NumUtil.isShort(imm) : "Is not in short range: " + imm;
 775                 AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm);
 776                 break;
 777             case DWORD:
 778             case SINGLE:
 779                 assert NumUtil.isInt(imm) : "Is not in int range: " + imm;
 780                 masm.movl(dest, (int) imm);
 781                 break;
 782             case QWORD:
 783             case DOUBLE:
 784                 masm.movlong(dest, imm);
 785                 break;
 786             default:
 787                 throw GraalError.shouldNotReachHere("Unknown result Kind: " + result.getPlatformKind());
 788         }
 789     }
 790 
 791     public abstract static class PointerCompressionOp extends AMD64LIRInstruction {
 792         protected final LIRKindTool lirKindTool;
 793         protected final CompressEncoding encoding;
 794         protected final boolean nonNull;
 795 
 796         @Def({REG, HINT}) private AllocatableValue result;
 797         @Use({REG, CONST}) private Value input;
 798         @Alive({REG, ILLEGAL, UNINITIALIZED}) private AllocatableValue baseRegister;
 799 
 800         protected PointerCompressionOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 801                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 802 
 803             super(type);
 804             this.result = result;
 805             this.input = input;
 806             this.baseRegister = baseRegister;
 807             this.encoding = encoding;
 808             this.nonNull = nonNull;
 809             this.lirKindTool = lirKindTool;
 810         }
 811 
 812         public static boolean hasBase(OptionValues options, CompressEncoding encoding) {
 813             return GeneratePIC.getValue(options) || encoding.hasBase();
 814         }
 815 
 816         public final Value getInput() {
 817             return input;
 818         }
 819 
 820         public final AllocatableValue getResult() {
 821             return result;
 822         }
 823 
 824         protected final Register getResultRegister() {
 825             return asRegister(result);
 826         }
 827 
 828         protected final Register getBaseRegister(CompilationResultBuilder crb) {
 829             return hasBase(crb.getOptions(), encoding) ? asRegister(baseRegister) : Register.None;
 830         }
 831 
 832         protected final int getShift() {
 833             return encoding.getShift();
 834         }
 835 
 836         protected final void move(LIRKind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 837             AMD64Move.move((AMD64Kind) kind.getPlatformKind(), crb, masm, result, input);
 838         }
 839     }
 840 
 841     public static class CompressPointerOp extends PointerCompressionOp {
 842         public static final LIRInstructionClass<CompressPointerOp> TYPE = LIRInstructionClass.create(CompressPointerOp.class);
 843 
 844         public CompressPointerOp(AllocatableValue result, Value input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 845             this(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 846         }
 847 
 848         private CompressPointerOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 849                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 850 
 851             super(type, result, input, baseRegister, encoding, nonNull, lirKindTool);
 852         }
 853 
 854         @Override
 855         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 856             move(lirKindTool.getObjectKind(), crb, masm);
 857 
 858             final Register resReg = getResultRegister();
 859             final Register baseReg = getBaseRegister(crb);
 860             if (!baseReg.equals(Register.None)) {
 861                 if (!nonNull) {
 862                     masm.testq(resReg, resReg);
 863                     masm.cmovq(Equal, resReg, baseReg);
 864                 }
 865                 masm.subq(resReg, baseReg);
 866             }
 867 
 868             int shift = getShift();
 869             if (shift != 0) {
 870                 masm.shrq(resReg, shift);
 871             }
 872         }
 873     }
 874 
 875     public static class UncompressPointerOp extends PointerCompressionOp {
 876         public static final LIRInstructionClass<UncompressPointerOp> TYPE = LIRInstructionClass.create(UncompressPointerOp.class);
 877 
 878         public UncompressPointerOp(AllocatableValue result, Value input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 879             this(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 880         }
 881 
 882         private UncompressPointerOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 883                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 884             super(type, result, input, baseRegister, encoding, nonNull, lirKindTool);
 885         }
 886 
 887         @Override
 888         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 889             Register baseReg = getBaseRegister(crb);
 890             if (nonNull && !baseReg.equals(Register.None) && getInput() instanceof RegisterValue) {
 891                 Register inputReg = ((RegisterValue) getInput()).getRegister();
 892                 if (!inputReg.equals(getResultRegister())) {
 893                     masm.leaq(getResultRegister(), new AMD64Address(baseReg, inputReg, AMD64Address.Scale.fromShift(getShift())));
 894                     return;
 895                 }
 896             }
 897             move(lirKindTool.getNarrowOopKind(), crb, masm);
 898             emitUncompressCode(masm, getResultRegister(), getShift(), baseReg, nonNull);
 899         }
 900 
 901         public static void emitUncompressCode(AMD64MacroAssembler masm, Register resReg, int shift, Register baseReg, boolean nonNull) {
 902             if (nonNull) {
 903                 if (!baseReg.equals(Register.None)) {
 904                     if (shift != 0) {
 905                         masm.leaq(resReg, new AMD64Address(baseReg, resReg, AMD64Address.Scale.fromShift(shift)));
 906                     } else {
 907                         masm.addq(resReg, baseReg);
 908                     }
 909                 } else if (shift != 0) {
 910                     masm.shlq(resReg, shift);
 911                 }
 912             } else {
 913                 if (shift != 0) {
 914                     masm.shlq(resReg, shift);
 915                 }
 916 
 917                 if (!baseReg.equals(Register.None)) {
 918                     if (shift == 0) {
 919                         // if encoding.shift != 0, the flags are already set by the shlq
 920                         masm.testq(resReg, resReg);
 921                     }
 922 
 923                     Label done = new Label();
 924                     masm.jccb(Equal, done);
 925                     masm.addq(resReg, baseReg);
 926                     masm.bind(done);
 927                 }
 928             }
 929         }
 930     }
 931 
 932     private abstract static class ZeroNullConversionOp extends AMD64LIRInstruction {
 933         @Def({REG, HINT}) protected AllocatableValue result;
 934         @Use({REG}) protected AllocatableValue input;
 935 
 936         protected ZeroNullConversionOp(LIRInstructionClass<? extends ZeroNullConversionOp> type, AllocatableValue result, AllocatableValue input) {
 937             super(type);
 938             this.result = result;
 939             this.input = input;
 940         }
 941 
 942         @Override
 943         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 944             Register nullRegister = crb.nullRegister;
 945             if (!nullRegister.equals(Register.None)) {
 946                 emitConversion(asRegister(result), asRegister(input), nullRegister, masm);
 947             }
 948         }
 949 
 950         protected abstract void emitConversion(Register resultRegister, Register inputRegister, Register nullRegister, AMD64MacroAssembler masm);
 951     }
 952 
 953     public static class ConvertNullToZeroOp extends ZeroNullConversionOp {
 954         public static final LIRInstructionClass<ConvertNullToZeroOp> TYPE = LIRInstructionClass.create(ConvertNullToZeroOp.class);
 955 
 956         public ConvertNullToZeroOp(AllocatableValue result, AllocatableValue input) {
 957             super(TYPE, result, input);
 958         }
 959 
 960         @Override
 961         protected final void emitConversion(Register resultRegister, Register inputRegister, Register nullRegister, AMD64MacroAssembler masm) {
 962             if (inputRegister.equals(resultRegister)) {
 963                 masm.subq(inputRegister, nullRegister);
 964                 Label done = new Label();
 965                 masm.jccb(Equal, done);
 966                 masm.addq(inputRegister, nullRegister);
 967                 masm.bind(done);
 968             } else {
 969                 masm.subq(resultRegister, resultRegister);
 970                 masm.cmpq(inputRegister, nullRegister);
 971                 masm.cmovq(NotEqual, resultRegister, inputRegister);
 972             }
 973         }
 974     }
 975 
 976     public static class ConvertZeroToNullOp extends ZeroNullConversionOp {
 977         public static final LIRInstructionClass<ConvertZeroToNullOp> TYPE = LIRInstructionClass.create(ConvertZeroToNullOp.class);
 978 
 979         public ConvertZeroToNullOp(AllocatableValue result, AllocatableValue input) {
 980             super(TYPE, result, input);
 981         }
 982 
 983         @Override
 984         protected final void emitConversion(Register resultRegister, Register inputRegister, Register nullRegister, AMD64MacroAssembler masm) {
 985             if (!inputRegister.equals(resultRegister)) {
 986                 masm.movq(resultRegister, inputRegister);
 987             }
 988             masm.testq(inputRegister, inputRegister);
 989             masm.cmovq(Equal, resultRegister, nullRegister);
 990         }
 991     }
 992 }