1 /*
   2  * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.amd64;
  26 
  27 import static java.lang.Double.doubleToRawLongBits;
  28 import static java.lang.Float.floatToRawIntBits;
  29 import static jdk.vm.ci.code.ValueUtil.asRegister;
  30 import static jdk.vm.ci.code.ValueUtil.isRegister;
  31 import static jdk.vm.ci.code.ValueUtil.isStackSlot;
  32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag.Equal;
  33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag.NotEqual;
  34 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
  35 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
  36 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  37 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
  38 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  39 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  40 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
  41 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
  42 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
  43 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
  44 
  45 import org.graalvm.compiler.asm.Label;
  46 import org.graalvm.compiler.asm.amd64.AMD64Address;
  47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
  48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
  49 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
  50 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  51 import org.graalvm.compiler.core.common.CompressEncoding;
  52 import org.graalvm.compiler.core.common.LIRKind;
  53 import org.graalvm.compiler.core.common.NumUtil;
  54 import org.graalvm.compiler.core.common.spi.LIRKindTool;
  55 import org.graalvm.compiler.core.common.type.DataPointerConstant;
  56 import org.graalvm.compiler.debug.GraalError;
  57 import org.graalvm.compiler.lir.LIRFrameState;
  58 import org.graalvm.compiler.lir.LIRInstructionClass;
  59 import org.graalvm.compiler.lir.Opcode;
  60 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
  61 import org.graalvm.compiler.lir.StandardOp.NullCheck;
  62 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
  63 import org.graalvm.compiler.lir.VirtualStackSlot;
  64 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  65 import org.graalvm.compiler.options.OptionValues;
  66 
  67 import jdk.vm.ci.amd64.AMD64;
  68 import jdk.vm.ci.amd64.AMD64Kind;
  69 import jdk.vm.ci.code.Register;
  70 import jdk.vm.ci.code.RegisterValue;
  71 import jdk.vm.ci.code.StackSlot;
  72 import jdk.vm.ci.meta.AllocatableValue;
  73 import jdk.vm.ci.meta.Constant;
  74 import jdk.vm.ci.meta.JavaConstant;
  75 import jdk.vm.ci.meta.Value;
  76 
  77 public class AMD64Move {
  78 
  79     private abstract static class AbstractMoveOp extends AMD64LIRInstruction implements ValueMoveOp {
  80         public static final LIRInstructionClass<AbstractMoveOp> TYPE = LIRInstructionClass.create(AbstractMoveOp.class);
  81 
  82         private AMD64Kind moveKind;
  83 
  84         protected AbstractMoveOp(LIRInstructionClass<? extends AbstractMoveOp> c, AMD64Kind moveKind) {
  85             super(c);
  86             this.moveKind = moveKind;
  87         }
  88 
  89         @Override
  90         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
  91             move(moveKind, crb, masm, getResult(), getInput());
  92         }
  93     }
  94 
  95     @Opcode("MOVE")
  96     public static final class MoveToRegOp extends AbstractMoveOp {
  97         public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);
  98 
  99         @Def({REG, HINT}) protected AllocatableValue result;
 100         @Use({REG, STACK}) protected AllocatableValue input;
 101 
 102         public MoveToRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
 103             super(TYPE, moveKind);
 104             this.result = result;
 105             this.input = input;
 106         }
 107 
 108         @Override
 109         public AllocatableValue getInput() {
 110             return input;
 111         }
 112 
 113         @Override
 114         public AllocatableValue getResult() {
 115             return result;
 116         }
 117     }
 118 
 119     @Opcode("MOVE")
 120     public static final class MoveFromRegOp extends AbstractMoveOp {
 121         public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class);
 122 
 123         @Def({REG, STACK}) protected AllocatableValue result;
 124         @Use({REG, HINT}) protected AllocatableValue input;
 125 
 126         public MoveFromRegOp(AMD64Kind moveKind, AllocatableValue result, AllocatableValue input) {
 127             super(TYPE, moveKind);
 128             this.result = result;
 129             this.input = input;
 130         }
 131 
 132         @Override
 133         public AllocatableValue getInput() {
 134             return input;
 135         }
 136 
 137         @Override
 138         public AllocatableValue getResult() {
 139             return result;
 140         }
 141     }
 142 
 143     @Opcode("MOVE")
 144     public static class MoveFromConstOp extends AMD64LIRInstruction implements LoadConstantOp {
 145         public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);
 146 
 147         @Def({REG, STACK}) protected AllocatableValue result;
 148         private final JavaConstant input;
 149 
 150         public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
 151             super(TYPE);
 152             this.result = result;
 153             this.input = input;
 154         }
 155 
 156         @Override
 157         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 158             if (isRegister(result)) {
 159                 const2reg(crb, masm, asRegister(result), input, (AMD64Kind) result.getPlatformKind());
 160             } else {
 161                 assert isStackSlot(result);
 162                 const2stack(crb, masm, result, input);
 163             }
 164         }
 165 
 166         @Override
 167         public Constant getConstant() {
 168             return input;
 169         }
 170 
 171         @Override
 172         public AllocatableValue getResult() {
 173             return result;
 174         }
 175     }
 176 
 177     @Opcode("STACKMOVE")
 178     public static final class AMD64StackMove extends AMD64LIRInstruction implements ValueMoveOp {
 179         public static final LIRInstructionClass<AMD64StackMove> TYPE = LIRInstructionClass.create(AMD64StackMove.class);
 180 
 181         @Def({STACK}) protected AllocatableValue result;
 182         @Use({STACK, HINT}) protected AllocatableValue input;
 183         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 184 
 185         private Register scratch;
 186 
 187         public AMD64StackMove(AllocatableValue result, AllocatableValue input, Register scratch, AllocatableValue backupSlot) {
 188             super(TYPE);
 189             this.result = result;
 190             this.input = input;
 191             this.backupSlot = backupSlot;
 192             this.scratch = scratch;
 193         }
 194 
 195         @Override
 196         public AllocatableValue getInput() {
 197             return input;
 198         }
 199 
 200         @Override
 201         public AllocatableValue getResult() {
 202             return result;
 203         }
 204 
 205         public Register getScratchRegister() {
 206             return scratch;
 207         }
 208 
 209         public AllocatableValue getBackupSlot() {
 210             return backupSlot;
 211         }
 212 
 213         @Override
 214         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 215             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 216             if (backupKind.isXMM()) {
 217                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 218                 backupKind = AMD64Kind.DOUBLE;
 219             }
 220 
 221             // backup scratch register
 222             reg2stack(backupKind, crb, masm, backupSlot, scratch);
 223             // move stack slot
 224             stack2reg((AMD64Kind) getInput().getPlatformKind(), crb, masm, scratch, getInput());
 225             reg2stack((AMD64Kind) getResult().getPlatformKind(), crb, masm, getResult(), scratch);
 226             // restore scratch register
 227             stack2reg(backupKind, crb, masm, scratch, backupSlot);
 228         }
 229     }
 230 
 231     @Opcode("MULTISTACKMOVE")
 232     public static final class AMD64MultiStackMove extends AMD64LIRInstruction {
 233         public static final LIRInstructionClass<AMD64MultiStackMove> TYPE = LIRInstructionClass.create(AMD64MultiStackMove.class);
 234 
 235         @Def({STACK}) protected AllocatableValue[] results;
 236         @Use({STACK}) protected Value[] inputs;
 237         @Alive({OperandFlag.STACK, OperandFlag.UNINITIALIZED}) private AllocatableValue backupSlot;
 238 
 239         private Register scratch;
 240 
 241         public AMD64MultiStackMove(AllocatableValue[] results, Value[] inputs, Register scratch, AllocatableValue backupSlot) {
 242             super(TYPE);
 243             this.results = results;
 244             this.inputs = inputs;
 245             this.backupSlot = backupSlot;
 246             this.scratch = scratch;
 247         }
 248 
 249         @Override
 250         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 251             AMD64Kind backupKind = (AMD64Kind) backupSlot.getPlatformKind();
 252             if (backupKind.isXMM()) {
 253                 // graal doesn't use vector values, so it's safe to backup using DOUBLE
 254                 backupKind = AMD64Kind.DOUBLE;
 255             }
 256 
 257             // backup scratch register
 258             move(backupKind, crb, masm, backupSlot, scratch.asValue(backupSlot.getValueKind()));
 259             for (int i = 0; i < results.length; i++) {
 260                 Value input = inputs[i];
 261                 AllocatableValue result = results[i];
 262                 // move stack slot
 263                 move((AMD64Kind) input.getPlatformKind(), crb, masm, scratch.asValue(input.getValueKind()), input);
 264                 move((AMD64Kind) result.getPlatformKind(), crb, masm, result, scratch.asValue(result.getValueKind()));
 265             }
 266             // restore scratch register
 267             move(backupKind, crb, masm, scratch.asValue(backupSlot.getValueKind()), backupSlot);
 268         }
 269     }
 270 
 271     @Opcode("STACKMOVE")
 272     public static final class AMD64PushPopStackMove extends AMD64LIRInstruction implements ValueMoveOp {
 273         public static final LIRInstructionClass<AMD64PushPopStackMove> TYPE = LIRInstructionClass.create(AMD64PushPopStackMove.class);
 274 
 275         @Def({STACK}) protected AllocatableValue result;
 276         @Use({STACK, HINT}) protected AllocatableValue input;
 277         private final OperandSize size;
 278 
 279         public AMD64PushPopStackMove(OperandSize size, AllocatableValue result, AllocatableValue input) {
 280             super(TYPE);
 281             this.result = result;
 282             this.input = input;
 283             this.size = size;
 284         }
 285 
 286         @Override
 287         public AllocatableValue getInput() {
 288             return input;
 289         }
 290 
 291         @Override
 292         public AllocatableValue getResult() {
 293             return result;
 294         }
 295 
 296         @Override
 297         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 298             AMD64MOp.PUSH.emit(masm, size, (AMD64Address) crb.asAddress(input));
 299             AMD64MOp.POP.emit(masm, size, (AMD64Address) crb.asAddress(result));
 300         }
 301     }
 302 
 303     public static final class LeaOp extends AMD64LIRInstruction {
 304         public static final LIRInstructionClass<LeaOp> TYPE = LIRInstructionClass.create(LeaOp.class);
 305 
 306         @Def({REG}) protected AllocatableValue result;
 307         @Use({COMPOSITE, UNINITIALIZED}) protected AMD64AddressValue address;
 308         private final OperandSize size;
 309 
 310         public LeaOp(AllocatableValue result, AMD64AddressValue address, OperandSize size) {
 311             super(TYPE);
 312             this.result = result;
 313             this.address = address;
 314             this.size = size;
 315         }
 316 
 317         @Override
 318         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 319             if (size == OperandSize.QWORD) {
 320                 masm.leaq(asRegister(result, AMD64Kind.QWORD), address.toAddress());
 321             } else {
 322                 assert size == OperandSize.DWORD;
 323                 masm.lead(asRegister(result, AMD64Kind.DWORD), address.toAddress());
 324             }
 325         }
 326     }
 327 
 328     public static final class LeaDataOp extends AMD64LIRInstruction {
 329         public static final LIRInstructionClass<LeaDataOp> TYPE = LIRInstructionClass.create(LeaDataOp.class);
 330 
 331         @Def({REG}) protected AllocatableValue result;
 332         private final DataPointerConstant data;
 333 
 334         public LeaDataOp(AllocatableValue result, DataPointerConstant data) {
 335             super(TYPE);
 336             this.result = result;
 337             this.data = data;
 338         }
 339 
 340         @Override
 341         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 342             masm.leaq(asRegister(result), (AMD64Address) crb.recordDataReferenceInCode(data));
 343         }
 344     }
 345 
 346     public static final class StackLeaOp extends AMD64LIRInstruction {
 347         public static final LIRInstructionClass<StackLeaOp> TYPE = LIRInstructionClass.create(StackLeaOp.class);
 348 
 349         @Def({REG}) protected AllocatableValue result;
 350         @Use({STACK, UNINITIALIZED}) protected AllocatableValue slot;
 351 
 352         public StackLeaOp(AllocatableValue result, AllocatableValue slot) {
 353             super(TYPE);
 354             this.result = result;
 355             this.slot = slot;
 356             assert slot instanceof VirtualStackSlot || slot instanceof StackSlot;
 357         }
 358 
 359         @Override
 360         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 361             masm.leaq(asRegister(result, AMD64Kind.QWORD), (AMD64Address) crb.asAddress(slot));
 362         }
 363     }
 364 
 365     public static final class MembarOp extends AMD64LIRInstruction {
 366         public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class);
 367 
 368         private final int barriers;
 369 
 370         public MembarOp(final int barriers) {
 371             super(TYPE);
 372             this.barriers = barriers;
 373         }
 374 
 375         @Override
 376         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 377             masm.membar(barriers);
 378         }
 379     }
 380 
 381     public static final class NullCheckOp extends AMD64LIRInstruction implements NullCheck {
 382         public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class);
 383 
 384         @Use({COMPOSITE}) protected AMD64AddressValue address;
 385         @State protected LIRFrameState state;
 386 
 387         public NullCheckOp(AMD64AddressValue address, LIRFrameState state) {
 388             super(TYPE);
 389             this.address = address;
 390             this.state = state;
 391         }
 392 
 393         @Override
 394         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 395             crb.recordImplicitException(masm.position(), state);
 396             masm.nullCheck(address.toAddress());
 397         }
 398 
 399         @Override
 400         public Value getCheckedValue() {
 401             return address.base;
 402         }
 403 
 404         @Override
 405         public LIRFrameState getState() {
 406             return state;
 407         }
 408     }
 409 
 410     @Opcode("CAS")
 411     public static final class CompareAndSwapOp extends AMD64LIRInstruction {
 412         public static final LIRInstructionClass<CompareAndSwapOp> TYPE = LIRInstructionClass.create(CompareAndSwapOp.class);
 413 
 414         private final AMD64Kind accessKind;
 415 
 416         @Def protected AllocatableValue result;
 417         @Use({COMPOSITE}) protected AMD64AddressValue address;
 418         @Use protected AllocatableValue cmpValue;
 419         @Use protected AllocatableValue newValue;
 420 
 421         public CompareAndSwapOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue cmpValue, AllocatableValue newValue) {
 422             super(TYPE);
 423             this.accessKind = accessKind;
 424             this.result = result;
 425             this.address = address;
 426             this.cmpValue = cmpValue;
 427             this.newValue = newValue;
 428         }
 429 
 430         @Override
 431         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 432             assert asRegister(cmpValue).equals(AMD64.rax) && asRegister(result).equals(AMD64.rax);
 433 
 434             if (crb.target.isMP) {
 435                 masm.lock();
 436             }
 437             switch (accessKind) {
 438                 case BYTE:
 439                     masm.cmpxchgb(asRegister(newValue), address.toAddress());
 440                     break;
 441                 case WORD:
 442                     masm.cmpxchgw(asRegister(newValue), address.toAddress());
 443                     break;
 444                 case DWORD:
 445                     masm.cmpxchgl(asRegister(newValue), address.toAddress());
 446                     break;
 447                 case QWORD:
 448                     masm.cmpxchgq(asRegister(newValue), address.toAddress());
 449                     break;
 450                 default:
 451                     throw GraalError.shouldNotReachHere();
 452             }
 453         }
 454     }
 455 
 456     @Opcode("ATOMIC_READ_AND_ADD")
 457     public static final class AtomicReadAndAddOp extends AMD64LIRInstruction {
 458         public static final LIRInstructionClass<AtomicReadAndAddOp> TYPE = LIRInstructionClass.create(AtomicReadAndAddOp.class);
 459 
 460         private final AMD64Kind accessKind;
 461 
 462         @Def protected AllocatableValue result;
 463         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 464         @Use protected AllocatableValue delta;
 465 
 466         public AtomicReadAndAddOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue delta) {
 467             super(TYPE);
 468             this.accessKind = accessKind;
 469             this.result = result;
 470             this.address = address;
 471             this.delta = delta;
 472         }
 473 
 474         @Override
 475         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 476             move(accessKind, crb, masm, result, delta);
 477             if (crb.target.isMP) {
 478                 masm.lock();
 479             }
 480             switch (accessKind) {
 481                 case BYTE:
 482                     masm.xaddb(address.toAddress(), asRegister(result));
 483                     break;
 484                 case WORD:
 485                     masm.xaddw(address.toAddress(), asRegister(result));
 486                     break;
 487                 case DWORD:
 488                     masm.xaddl(address.toAddress(), asRegister(result));
 489                     break;
 490                 case QWORD:
 491                     masm.xaddq(address.toAddress(), asRegister(result));
 492                     break;
 493                 default:
 494                     throw GraalError.shouldNotReachHere();
 495             }
 496         }
 497     }
 498 
 499     @Opcode("ATOMIC_READ_AND_WRITE")
 500     public static final class AtomicReadAndWriteOp extends AMD64LIRInstruction {
 501         public static final LIRInstructionClass<AtomicReadAndWriteOp> TYPE = LIRInstructionClass.create(AtomicReadAndWriteOp.class);
 502 
 503         private final AMD64Kind accessKind;
 504 
 505         @Def protected AllocatableValue result;
 506         @Alive({COMPOSITE}) protected AMD64AddressValue address;
 507         @Use protected AllocatableValue newValue;
 508 
 509         public AtomicReadAndWriteOp(AMD64Kind accessKind, AllocatableValue result, AMD64AddressValue address, AllocatableValue newValue) {
 510             super(TYPE);
 511             this.accessKind = accessKind;
 512             this.result = result;
 513             this.address = address;
 514             this.newValue = newValue;
 515         }
 516 
 517         @Override
 518         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 519             move(accessKind, crb, masm, result, newValue);
 520             switch (accessKind) {
 521                 case BYTE:
 522                     masm.xchgb(asRegister(result), address.toAddress());
 523                     break;
 524                 case WORD:
 525                     masm.xchgw(asRegister(result), address.toAddress());
 526                     break;
 527                 case DWORD:
 528                     masm.xchgl(asRegister(result), address.toAddress());
 529                     break;
 530                 case QWORD:
 531                     masm.xchgq(asRegister(result), address.toAddress());
 532                     break;
 533                 default:
 534                     throw GraalError.shouldNotReachHere();
 535             }
 536         }
 537     }
 538 
 539     public static void move(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 540         move((AMD64Kind) result.getPlatformKind(), crb, masm, result, input);
 541     }
 542 
 543     public static void move(AMD64Kind moveKind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Value input) {
 544         if (isRegister(input)) {
 545             if (isRegister(result)) {
 546                 reg2reg(moveKind, masm, result, input);
 547             } else if (isStackSlot(result)) {
 548                 reg2stack(moveKind, crb, masm, result, asRegister(input));
 549             } else {
 550                 throw GraalError.shouldNotReachHere();
 551             }
 552         } else if (isStackSlot(input)) {
 553             if (isRegister(result)) {
 554                 stack2reg(moveKind, crb, masm, asRegister(result), input);
 555             } else {
 556                 throw GraalError.shouldNotReachHere();
 557             }
 558         } else if (isJavaConstant(input)) {
 559             if (isRegister(result)) {
 560                 const2reg(crb, masm, asRegister(result), asJavaConstant(input), moveKind);
 561             } else if (isStackSlot(result)) {
 562                 const2stack(crb, masm, result, asJavaConstant(input));
 563             } else {
 564                 throw GraalError.shouldNotReachHere();
 565             }
 566         } else {
 567             throw GraalError.shouldNotReachHere();
 568         }
 569     }
 570 
 571     private static void reg2reg(AMD64Kind kind, AMD64MacroAssembler masm, Value result, Value input) {
 572         if (asRegister(input).equals(asRegister(result))) {
 573             return;
 574         }
 575         assert asRegister(result).getRegisterCategory().equals(asRegister(input).getRegisterCategory());
 576         switch (kind) {
 577             case BYTE:
 578             case WORD:
 579             case DWORD:
 580                 masm.movl(asRegister(result), asRegister(input));
 581                 break;
 582             case QWORD:
 583                 masm.movq(asRegister(result), asRegister(input));
 584                 break;
 585             case SINGLE:
 586                 masm.movflt(asRegister(result, AMD64Kind.SINGLE), asRegister(input, AMD64Kind.SINGLE));
 587                 break;
 588             case DOUBLE:
 589                 masm.movdbl(asRegister(result, AMD64Kind.DOUBLE), asRegister(input, AMD64Kind.DOUBLE));
 590                 break;
 591             default:
 592                 throw GraalError.shouldNotReachHere("kind=" + kind);
 593         }
 594     }
 595 
 596     public static void reg2stack(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, Register input) {
 597         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 598         switch (kind) {
 599             case BYTE:
 600                 masm.movb(dest, input);
 601                 break;
 602             case WORD:
 603                 masm.movw(dest, input);
 604                 break;
 605             case DWORD:
 606                 masm.movl(dest, input);
 607                 break;
 608             case QWORD:
 609                 masm.movq(dest, input);
 610                 break;
 611             case SINGLE:
 612                 masm.movflt(dest, input);
 613                 break;
 614             case DOUBLE:
 615                 masm.movsd(dest, input);
 616                 break;
 617             default:
 618                 throw GraalError.shouldNotReachHere();
 619         }
 620     }
 621 
 622     public static void stack2reg(AMD64Kind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Value input) {
 623         AMD64Address src = (AMD64Address) crb.asAddress(input);
 624         switch (kind) {
 625             case BYTE:
 626                 masm.movsbl(result, src);
 627                 break;
 628             case WORD:
 629                 masm.movswl(result, src);
 630                 break;
 631             case DWORD:
 632                 masm.movl(result, src);
 633                 break;
 634             case QWORD:
 635                 masm.movq(result, src);
 636                 break;
 637             case SINGLE:
 638                 masm.movflt(result, src);
 639                 break;
 640             case DOUBLE:
 641                 masm.movdbl(result, src);
 642                 break;
 643             default:
 644                 throw GraalError.shouldNotReachHere();
 645         }
 646     }
 647 
 648     public static void const2reg(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, JavaConstant input, AMD64Kind moveKind) {
 649         /*
 650          * Note: we use the kind of the input operand (and not the kind of the result operand)
 651          * because they don't match in all cases. For example, an object constant can be loaded to a
 652          * long register when unsafe casts occurred (e.g., for a write barrier where arithmetic
 653          * operations are then performed on the pointer).
 654          */
 655         switch (input.getJavaKind().getStackKind()) {
 656             case Int:
 657                 // Do not optimize with an XOR as this instruction may be between
 658                 // a CMP and a Jcc in which case the XOR will modify the condition
 659                 // flags and interfere with the Jcc.
 660                 masm.movl(result, input.asInt());
 661 
 662                 break;
 663             case Long:
 664                 // Do not optimize with an XOR as this instruction may be between
 665                 // a CMP and a Jcc in which case the XOR will modify the condition
 666                 // flags and interfere with the Jcc.
 667                 if (input.asLong() == (int) input.asLong()) {
 668                     // Sign extended to long
 669                     masm.movslq(result, (int) input.asLong());
 670                 } else if ((input.asLong() & 0xFFFFFFFFL) == input.asLong()) {
 671                     // Zero extended to long
 672                     masm.movl(result, (int) input.asLong());
 673                 } else {
 674                     masm.movq(result, input.asLong());
 675                 }
 676                 break;
 677             case Float:
 678                 // This is *not* the same as 'constant == 0.0f' in the case where constant is -0.0f
 679                 if (Float.floatToRawIntBits(input.asFloat()) == Float.floatToRawIntBits(0.0f)) {
 680                     masm.xorps(result, result);
 681                 } else {
 682                     masm.movflt(result, (AMD64Address) crb.asFloatConstRef(input));
 683                 }
 684                 break;
 685             case Double:
 686                 // This is *not* the same as 'constant == 0.0d' in the case where constant is -0.0d
 687                 if (Double.doubleToRawLongBits(input.asDouble()) == Double.doubleToRawLongBits(0.0d)) {
 688                     masm.xorpd(result, result);
 689                 } else {
 690                     masm.movdbl(result, (AMD64Address) crb.asDoubleConstRef(input));
 691                 }
 692                 break;
 693             case Object:
 694                 assert moveKind != null : "a nun-null moveKind is required for loading an object constant";
 695                 // Do not optimize with an XOR as this instruction may be between
 696                 // a CMP and a Jcc in which case the XOR will modify the condition
 697                 // flags and interfere with the Jcc.
 698                 if (input.isNull()) {
 699                     if (moveKind == AMD64Kind.QWORD && crb.mustReplaceWithUncompressedNullRegister(input)) {
 700                         masm.movq(result, crb.uncompressedNullRegister);
 701                     } else {
 702                         // Upper bits will be zeroed so this also works for narrow oops
 703                         masm.movslq(result, 0);
 704                     }
 705                 } else {
 706                     if (crb.target.inlineObjects) {
 707                         crb.recordInlineDataInCode(input);
 708                         if (moveKind == AMD64Kind.DWORD) {
 709                             // Support for narrow oops
 710                             masm.movl(result, 0xDEADDEAD, true);
 711                         } else {
 712                             masm.movq(result, 0xDEADDEADDEADDEADL, true);
 713                         }
 714                     } else {
 715                         if (moveKind == AMD64Kind.DWORD) {
 716                             // Support for narrow oops
 717                             masm.movl(result, (AMD64Address) crb.recordDataReferenceInCode(input, 0));
 718                         } else {
 719                             masm.movq(result, (AMD64Address) crb.recordDataReferenceInCode(input, 0));
 720                         }
 721                     }
 722                 }
 723                 break;
 724             default:
 725                 throw GraalError.shouldNotReachHere();
 726         }
 727     }
 728 
 729     public static boolean canMoveConst2Stack(JavaConstant input) {
 730         switch (input.getJavaKind().getStackKind()) {
 731             case Int:
 732                 break;
 733             case Long:
 734                 break;
 735             case Float:
 736                 break;
 737             case Double:
 738                 break;
 739             case Object:
 740                 if (input.isNull()) {
 741                     return true;
 742                 } else {
 743                     return false;
 744                 }
 745             default:
 746                 return false;
 747         }
 748         return true;
 749     }
 750 
 751     public static void const2stack(CompilationResultBuilder crb, AMD64MacroAssembler masm, Value result, JavaConstant input) {
 752         AMD64Address dest = (AMD64Address) crb.asAddress(result);
 753         final long imm;
 754         switch (input.getJavaKind().getStackKind()) {
 755             case Int:
 756                 imm = input.asInt();
 757                 break;
 758             case Long:
 759                 imm = input.asLong();
 760                 break;
 761             case Float:
 762                 imm = floatToRawIntBits(input.asFloat());
 763                 break;
 764             case Double:
 765                 imm = doubleToRawLongBits(input.asDouble());
 766                 break;
 767             case Object:
 768                 if (input.isNull()) {
 769                     if (crb.mustReplaceWithUncompressedNullRegister(input)) {
 770                         masm.movq(dest, crb.uncompressedNullRegister);
 771                         return;
 772                     }
 773                     imm = 0;
 774                 } else {
 775                     throw GraalError.shouldNotReachHere("Non-null object constants must be in a register");
 776                 }
 777                 break;
 778             default:
 779                 throw GraalError.shouldNotReachHere();
 780         }
 781 
 782         switch ((AMD64Kind) result.getPlatformKind()) {
 783             case BYTE:
 784                 assert NumUtil.isByte(imm) : "Is not in byte range: " + imm;
 785                 AMD64MIOp.MOVB.emit(masm, OperandSize.BYTE, dest, (int) imm);
 786                 break;
 787             case WORD:
 788                 assert NumUtil.isShort(imm) : "Is not in short range: " + imm;
 789                 AMD64MIOp.MOV.emit(masm, OperandSize.WORD, dest, (int) imm);
 790                 break;
 791             case DWORD:
 792             case SINGLE:
 793                 assert NumUtil.isInt(imm) : "Is not in int range: " + imm;
 794                 masm.movl(dest, (int) imm);
 795                 break;
 796             case QWORD:
 797             case DOUBLE:
 798                 masm.movlong(dest, imm);
 799                 break;
 800             default:
 801                 throw GraalError.shouldNotReachHere("Unknown result Kind: " + result.getPlatformKind());
 802         }
 803     }
 804 
 805     public abstract static class PointerCompressionOp extends AMD64LIRInstruction {
 806         protected final LIRKindTool lirKindTool;
 807         protected final CompressEncoding encoding;
 808         protected final boolean nonNull;
 809 
 810         @Def({REG, HINT}) private AllocatableValue result;
 811         @Use({REG, CONST}) private Value input;
 812         @Alive({REG, ILLEGAL, UNINITIALIZED}) private AllocatableValue baseRegister;
 813 
 814         protected PointerCompressionOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 815                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 816 
 817             super(type);
 818             this.result = result;
 819             this.input = input;
 820             this.baseRegister = baseRegister;
 821             this.encoding = encoding;
 822             this.nonNull = nonNull;
 823             this.lirKindTool = lirKindTool;
 824         }
 825 
 826         public static boolean hasBase(OptionValues options, CompressEncoding encoding) {
 827             return GeneratePIC.getValue(options) || encoding.hasBase();
 828         }
 829 
 830         public final Value getInput() {
 831             return input;
 832         }
 833 
 834         public final AllocatableValue getResult() {
 835             return result;
 836         }
 837 
 838         protected final Register getResultRegister() {
 839             return asRegister(result);
 840         }
 841 
 842         protected final Register getBaseRegister(CompilationResultBuilder crb) {
 843             return hasBase(crb.getOptions(), encoding) ? asRegister(baseRegister) : Register.None;
 844         }
 845 
 846         protected final int getShift() {
 847             return encoding.getShift();
 848         }
 849 
 850         protected final void move(LIRKind kind, CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 851             AMD64Move.move((AMD64Kind) kind.getPlatformKind(), crb, masm, result, input);
 852         }
 853     }
 854 
 855     public static class CompressPointerOp extends PointerCompressionOp {
 856         public static final LIRInstructionClass<CompressPointerOp> TYPE = LIRInstructionClass.create(CompressPointerOp.class);
 857 
 858         public CompressPointerOp(AllocatableValue result, Value input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 859             this(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 860         }
 861 
 862         private CompressPointerOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 863                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 864 
 865             super(type, result, input, baseRegister, encoding, nonNull, lirKindTool);
 866         }
 867 
 868         @Override
 869         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 870             move(lirKindTool.getObjectKind(), crb, masm);
 871 
 872             final Register resReg = getResultRegister();
 873             final Register baseReg = getBaseRegister(crb);
 874             if (!baseReg.equals(Register.None)) {
 875                 if (!nonNull) {
 876                     masm.testq(resReg, resReg);
 877                     masm.cmovq(Equal, resReg, baseReg);
 878                 }
 879                 masm.subq(resReg, baseReg);
 880             }
 881 
 882             int shift = getShift();
 883             if (shift != 0) {
 884                 masm.shrq(resReg, shift);
 885             }
 886         }
 887     }
 888 
 889     public static class UncompressPointerOp extends PointerCompressionOp {
 890         public static final LIRInstructionClass<UncompressPointerOp> TYPE = LIRInstructionClass.create(UncompressPointerOp.class);
 891 
 892         public UncompressPointerOp(AllocatableValue result, Value input, AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 893             this(TYPE, result, input, baseRegister, encoding, nonNull, lirKindTool);
 894         }
 895 
 896         private UncompressPointerOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
 897                         AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
 898             super(type, result, input, baseRegister, encoding, nonNull, lirKindTool);
 899         }
 900 
 901         @Override
 902         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 903             Register baseReg = getBaseRegister(crb);
 904             if (nonNull && !baseReg.equals(Register.None) && getInput() instanceof RegisterValue) {
 905                 Register inputReg = ((RegisterValue) getInput()).getRegister();
 906                 if (!inputReg.equals(getResultRegister())) {
 907                     masm.leaq(getResultRegister(), new AMD64Address(baseReg, inputReg, AMD64Address.Scale.fromShift(getShift())));
 908                     return;
 909                 }
 910             }
 911             move(lirKindTool.getNarrowOopKind(), crb, masm);
 912             emitUncompressCode(masm, getResultRegister(), getShift(), baseReg, nonNull);
 913         }
 914 
 915         public static void emitUncompressCode(AMD64MacroAssembler masm, Register resReg, int shift, Register baseReg, boolean nonNull) {
 916             if (nonNull) {
 917                 if (!baseReg.equals(Register.None)) {
 918                     if (shift != 0) {
 919                         masm.leaq(resReg, new AMD64Address(baseReg, resReg, AMD64Address.Scale.fromShift(shift)));
 920                     } else {
 921                         masm.addq(resReg, baseReg);
 922                     }
 923                 } else if (shift != 0) {
 924                     masm.shlq(resReg, shift);
 925                 }
 926             } else {
 927                 if (shift != 0) {
 928                     masm.shlq(resReg, shift);
 929                 }
 930 
 931                 if (!baseReg.equals(Register.None)) {
 932                     if (shift == 0) {
 933                         // if encoding.shift != 0, the flags are already set by the shlq
 934                         masm.testq(resReg, resReg);
 935                     }
 936 
 937                     Label done = new Label();
 938                     masm.jccb(Equal, done);
 939                     masm.addq(resReg, baseReg);
 940                     masm.bind(done);
 941                 }
 942             }
 943         }
 944     }
 945 
 946     private abstract static class ZeroNullConversionOp extends AMD64LIRInstruction {
 947         @Def({REG, HINT}) protected AllocatableValue result;
 948         @Use({REG}) protected AllocatableValue input;
 949 
 950         protected ZeroNullConversionOp(LIRInstructionClass<? extends ZeroNullConversionOp> type, AllocatableValue result, AllocatableValue input) {
 951             super(type);
 952             this.result = result;
 953             this.input = input;
 954         }
 955 
 956         @Override
 957         public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 958             Register nullRegister = crb.uncompressedNullRegister;
 959             if (!nullRegister.equals(Register.None)) {
 960                 emitConversion(asRegister(result), asRegister(input), nullRegister, masm);
 961             }
 962         }
 963 
 964         protected abstract void emitConversion(Register resultRegister, Register inputRegister, Register nullRegister, AMD64MacroAssembler masm);
 965     }
 966 
 967     public static class ConvertNullToZeroOp extends ZeroNullConversionOp {
 968         public static final LIRInstructionClass<ConvertNullToZeroOp> TYPE = LIRInstructionClass.create(ConvertNullToZeroOp.class);
 969 
 970         public ConvertNullToZeroOp(AllocatableValue result, AllocatableValue input) {
 971             super(TYPE, result, input);
 972         }
 973 
 974         @Override
 975         protected final void emitConversion(Register resultRegister, Register inputRegister, Register nullRegister, AMD64MacroAssembler masm) {
 976             if (inputRegister.equals(resultRegister)) {
 977                 masm.subq(inputRegister, nullRegister);
 978                 Label done = new Label();
 979                 masm.jccb(Equal, done);
 980                 masm.addq(inputRegister, nullRegister);
 981                 masm.bind(done);
 982             } else {
 983                 masm.subq(resultRegister, resultRegister);
 984                 masm.cmpq(inputRegister, nullRegister);
 985                 masm.cmovq(NotEqual, resultRegister, inputRegister);
 986             }
 987         }
 988     }
 989 
 990     public static class ConvertZeroToNullOp extends ZeroNullConversionOp {
 991         public static final LIRInstructionClass<ConvertZeroToNullOp> TYPE = LIRInstructionClass.create(ConvertZeroToNullOp.class);
 992 
 993         public ConvertZeroToNullOp(AllocatableValue result, AllocatableValue input) {
 994             super(TYPE, result, input);
 995         }
 996 
 997         @Override
 998         protected final void emitConversion(Register resultRegister, Register inputRegister, Register nullRegister, AMD64MacroAssembler masm) {
 999             if (!inputRegister.equals(resultRegister)) {
1000                 masm.movq(resultRegister, inputRegister);
1001             }
1002             masm.testq(inputRegister, inputRegister);
1003             masm.cmovq(Equal, resultRegister, nullRegister);
1004         }
1005     }
1006 }