1 /* 2 * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 package org.graalvm.compiler.lir.aarch64; 24 25 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE; 26 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT; 27 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG; 28 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK; 29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED; 30 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 31 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 32 import static jdk.vm.ci.aarch64.AArch64.sp; 33 import static jdk.vm.ci.aarch64.AArch64.zr; 34 import static jdk.vm.ci.code.ValueUtil.asAllocatableValue; 35 import static jdk.vm.ci.code.ValueUtil.asRegister; 36 import static jdk.vm.ci.code.ValueUtil.asStackSlot; 37 import static jdk.vm.ci.code.ValueUtil.isRegister; 38 import static jdk.vm.ci.code.ValueUtil.isStackSlot; 39 40 import org.graalvm.compiler.asm.Label; 41 import org.graalvm.compiler.asm.aarch64.AArch64Address; 42 import org.graalvm.compiler.asm.aarch64.AArch64Assembler; 43 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler; 44 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.ScratchRegister; 45 import org.graalvm.compiler.core.common.LIRKind; 46 import org.graalvm.compiler.core.common.type.DataPointerConstant; 47 import org.graalvm.compiler.debug.GraalError; 48 import org.graalvm.compiler.lir.LIRFrameState; 49 import org.graalvm.compiler.lir.LIRInstructionClass; 50 import org.graalvm.compiler.lir.Opcode; 51 import org.graalvm.compiler.lir.StandardOp; 52 import org.graalvm.compiler.lir.StandardOp.LoadConstantOp; 53 import org.graalvm.compiler.lir.StandardOp.NullCheck; 54 import org.graalvm.compiler.lir.StandardOp.ValueMoveOp; 55 import org.graalvm.compiler.lir.VirtualStackSlot; 56 import org.graalvm.compiler.lir.asm.CompilationResultBuilder; 57 58 import jdk.vm.ci.aarch64.AArch64Kind; 59 import jdk.vm.ci.code.Register; 60 import jdk.vm.ci.code.StackSlot; 61 import jdk.vm.ci.meta.AllocatableValue; 62 import jdk.vm.ci.meta.Constant; 63 import jdk.vm.ci.meta.JavaConstant; 64 import jdk.vm.ci.meta.PlatformKind; 65 import jdk.vm.ci.meta.Value; 66 67 public class AArch64Move { 68 69 public static class LoadInlineConstant extends AArch64LIRInstruction implements LoadConstantOp { 70 public static final LIRInstructionClass<LoadInlineConstant> TYPE = LIRInstructionClass.create(LoadInlineConstant.class); 71 72 private JavaConstant constant; 73 @Def({REG, STACK}) AllocatableValue result; 74 75 public LoadInlineConstant(JavaConstant constant, AllocatableValue result) { 76 super(TYPE); 77 this.constant = constant; 78 this.result = result; 79 } 80 81 @Override 82 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 83 if (isRegister(result)) { 84 const2reg(crb, masm, result, constant); 85 } else if (isStackSlot(result)) { 86 StackSlot slot = asStackSlot(result); 87 const2stack(crb, masm, slot, constant); 88 } 89 } 90 91 @Override 92 public Constant getConstant() { 93 return constant; 94 } 95 96 @Override 97 public AllocatableValue getResult() { 98 return result; 99 } 100 } 101 102 @Opcode("MOVE") 103 public static class Move extends AArch64LIRInstruction implements ValueMoveOp { 104 public static final LIRInstructionClass<Move> TYPE = LIRInstructionClass.create(Move.class); 105 106 @Def({REG, STACK, HINT}) protected AllocatableValue result; 107 @Use({REG, STACK}) protected AllocatableValue input; 108 109 public Move(AllocatableValue result, AllocatableValue input) { 110 super(TYPE); 111 this.result = result; 112 this.input = input; 113 } 114 115 @Override 116 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 117 move(crb, masm, getResult(), getInput()); 118 } 119 120 @Override 121 public AllocatableValue getInput() { 122 return input; 123 } 124 125 @Override 126 public AllocatableValue getResult() { 127 return result; 128 } 129 } 130 131 public static class LoadAddressOp extends AArch64LIRInstruction { 132 public static final LIRInstructionClass<LoadAddressOp> TYPE = LIRInstructionClass.create(LoadAddressOp.class); 133 134 @Def protected AllocatableValue result; 135 @Use(COMPOSITE) protected AArch64AddressValue address; 136 137 public LoadAddressOp(AllocatableValue result, AArch64AddressValue address) { 138 super(TYPE); 139 this.result = result; 140 this.address = address; 141 } 142 143 @Override 144 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 145 Register dst = asRegister(result); 146 AArch64Address adr = address.toAddress(); 147 masm.loadAddress(dst, adr, address.getPlatformKind().getSizeInBytes()); 148 } 149 } 150 151 public static class LoadDataOp extends AArch64LIRInstruction { 152 public static final LIRInstructionClass<LoadDataOp> TYPE = LIRInstructionClass.create(LoadDataOp.class); 153 154 @Def protected AllocatableValue result; 155 private final DataPointerConstant data; 156 157 public LoadDataOp(AllocatableValue result, DataPointerConstant data) { 158 super(TYPE); 159 this.result = result; 160 this.data = data; 161 } 162 163 @Override 164 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 165 Register dst = asRegister(result); 166 masm.loadAddress(dst, (AArch64Address) crb.recordDataReferenceInCode(data), data.getAlignment()); 167 } 168 } 169 170 public static class StackLoadAddressOp extends AArch64LIRInstruction { 171 public static final LIRInstructionClass<StackLoadAddressOp> TYPE = LIRInstructionClass.create(StackLoadAddressOp.class); 172 173 @Def protected AllocatableValue result; 174 @Use({STACK, UNINITIALIZED}) protected AllocatableValue slot; 175 176 public StackLoadAddressOp(AllocatableValue result, AllocatableValue slot) { 177 super(TYPE); 178 assert slot instanceof VirtualStackSlot || slot instanceof StackSlot; 179 this.result = result; 180 this.slot = slot; 181 } 182 183 @Override 184 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 185 AArch64Address address = (AArch64Address) crb.asAddress(slot); 186 PlatformKind kind = AArch64Kind.QWORD; 187 masm.loadAddress(asRegister(result, kind), address, kind.getSizeInBytes()); 188 } 189 } 190 191 public static class MembarOp extends AArch64LIRInstruction { 192 public static final LIRInstructionClass<MembarOp> TYPE = LIRInstructionClass.create(MembarOp.class); 193 194 @SuppressWarnings("unused") private final int barriers; 195 196 public MembarOp(int barriers) { 197 super(TYPE); 198 this.barriers = barriers; 199 } 200 201 @Override 202 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 203 // As I understand it load acquire/store release have the same semantics as on IA64 204 // and allow us to handle LoadStore, LoadLoad and StoreStore without an explicit 205 // barrier. 206 // But Graal support to figure out if a load/store is volatile is non-existant so for 207 // now 208 // just use 209 // memory barriers everywhere. 210 // if ((barrier & MemoryBarriers.STORE_LOAD) != 0) { 211 masm.dmb(AArch64MacroAssembler.BarrierKind.ANY_ANY); 212 // } 213 } 214 } 215 216 abstract static class MemOp extends AArch64LIRInstruction implements StandardOp.ImplicitNullCheck { 217 218 protected final AArch64Kind kind; 219 @Use({COMPOSITE}) protected AArch64AddressValue addressValue; 220 @State protected LIRFrameState state; 221 222 MemOp(LIRInstructionClass<? extends MemOp> c, AArch64Kind kind, AArch64AddressValue address, LIRFrameState state) { 223 super(c); 224 this.kind = kind; 225 this.addressValue = address; 226 this.state = state; 227 } 228 229 protected abstract void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm); 230 231 @Override 232 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 233 if (state != null) { 234 crb.recordImplicitException(masm.position(), state); 235 } 236 emitMemAccess(crb, masm); 237 } 238 239 @Override 240 public boolean makeNullCheckFor(Value value, LIRFrameState nullCheckState, int implicitNullCheckLimit) { 241 int immediate = addressValue.getImmediate(); 242 if (state == null && value.equals(addressValue.getBase()) && addressValue.getOffset().equals(Value.ILLEGAL) && immediate >= 0 && immediate < implicitNullCheckLimit) { 243 state = nullCheckState; 244 return true; 245 } 246 return false; 247 } 248 } 249 250 public static final class LoadOp extends MemOp { 251 public static final LIRInstructionClass<LoadOp> TYPE = LIRInstructionClass.create(LoadOp.class); 252 253 @Def protected AllocatableValue result; 254 255 public LoadOp(AArch64Kind kind, AllocatableValue result, AArch64AddressValue address, LIRFrameState state) { 256 super(TYPE, kind, address, state); 257 this.result = result; 258 } 259 260 @Override 261 protected void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 262 AArch64Address address = addressValue.toAddress(); 263 Register dst = asRegister(result); 264 265 int destSize = result.getPlatformKind().getSizeInBytes() * Byte.SIZE; 266 int srcSize = kind.getSizeInBytes() * Byte.SIZE; 267 if (kind.isInteger()) { 268 // TODO How to load unsigned chars without the necessary information? 269 masm.ldrs(destSize, srcSize, dst, address); 270 } else { 271 assert srcSize == destSize; 272 masm.fldr(srcSize, dst, address); 273 } 274 } 275 } 276 277 public static class StoreOp extends MemOp { 278 public static final LIRInstructionClass<StoreOp> TYPE = LIRInstructionClass.create(StoreOp.class); 279 @Use protected AllocatableValue input; 280 281 public StoreOp(AArch64Kind kind, AArch64AddressValue address, AllocatableValue input, LIRFrameState state) { 282 super(TYPE, kind, address, state); 283 this.input = input; 284 } 285 286 @Override 287 protected void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 288 emitStore(crb, masm, kind, addressValue.toAddress(), input); 289 } 290 } 291 292 public static final class StoreConstantOp extends MemOp { 293 public static final LIRInstructionClass<StoreConstantOp> TYPE = LIRInstructionClass.create(StoreConstantOp.class); 294 295 protected final JavaConstant input; 296 297 public StoreConstantOp(AArch64Kind kind, AArch64AddressValue address, JavaConstant input, LIRFrameState state) { 298 super(TYPE, kind, address, state); 299 this.input = input; 300 if (!input.isDefaultForKind()) { 301 throw GraalError.shouldNotReachHere("Can only store null constants to memory"); 302 } 303 } 304 305 @Override 306 public void emitMemAccess(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 307 emitStore(crb, masm, kind, addressValue.toAddress(), zr.asValue(LIRKind.combine(addressValue))); 308 } 309 } 310 311 public static final class NullCheckOp extends AArch64LIRInstruction implements NullCheck { 312 public static final LIRInstructionClass<NullCheckOp> TYPE = LIRInstructionClass.create(NullCheckOp.class); 313 314 @Use(COMPOSITE) protected AArch64AddressValue address; 315 @State protected LIRFrameState state; 316 317 public NullCheckOp(AArch64AddressValue address, LIRFrameState state) { 318 super(TYPE); 319 this.address = address; 320 this.state = state; 321 } 322 323 @Override 324 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 325 crb.recordImplicitException(masm.position(), state); 326 masm.ldr(64, zr, address.toAddress()); 327 } 328 329 @Override 330 public Value getCheckedValue() { 331 return address.base; 332 } 333 334 @Override 335 public LIRFrameState getState() { 336 return state; 337 } 338 } 339 340 /** 341 * Compare and swap instruction. Does the following atomically: <code> 342 * CAS(newVal, expected, address): 343 * oldVal = *address 344 * if oldVal == expected: 345 * *address = newVal 346 * return oldVal 347 * </code> 348 */ 349 @Opcode("CAS") 350 public static class CompareAndSwapOp extends AArch64LIRInstruction { 351 public static final LIRInstructionClass<CompareAndSwapOp> TYPE = LIRInstructionClass.create(CompareAndSwapOp.class); 352 353 @Def protected AllocatableValue resultValue; 354 @Alive protected Value expectedValue; 355 @Alive protected AllocatableValue newValue; 356 @Alive protected AllocatableValue addressValue; 357 @Temp protected AllocatableValue scratchValue; 358 359 public CompareAndSwapOp(AllocatableValue result, Value expectedValue, AllocatableValue newValue, AllocatableValue addressValue, AllocatableValue scratch) { 360 super(TYPE); 361 this.resultValue = result; 362 this.expectedValue = expectedValue; 363 this.newValue = newValue; 364 this.addressValue = addressValue; 365 this.scratchValue = scratch; 366 } 367 368 @Override 369 public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { 370 AArch64Kind kind = (AArch64Kind) expectedValue.getPlatformKind(); 371 assert kind.isInteger(); 372 final int size = kind.getSizeInBytes() * Byte.SIZE; 373 374 Register address = asRegister(addressValue); 375 Register result = asRegister(resultValue); 376 Register newVal = asRegister(newValue); 377 Register scratch = asRegister(scratchValue); 378 // We could avoid using a scratch register here, by reusing resultValue for the stlxr 379 // success flag and issue a mov resultValue, expectedValue in case of success before 380 // returning. 381 Label retry = new Label(); 382 Label fail = new Label(); 383 masm.bind(retry); 384 masm.ldaxr(size, result, address); 385 AArch64Compare.gpCompare(masm, resultValue, expectedValue); 386 masm.branchConditionally(AArch64Assembler.ConditionFlag.NE, fail); 387 masm.stlxr(size, scratch, newVal, address); 388 // if scratch == 0 then write successful, else retry. 389 masm.cbnz(32, scratch, retry); 390 masm.bind(fail); 391 } 392 } 393 394 private static void emitStore(@SuppressWarnings("unused") CompilationResultBuilder crb, AArch64MacroAssembler masm, AArch64Kind kind, AArch64Address dst, Value src) { 395 int destSize = kind.getSizeInBytes() * Byte.SIZE; 396 if (kind.isInteger()) { 397 masm.str(destSize, asRegister(src), dst); 398 } else { 399 masm.fstr(destSize, asRegister(src), dst); 400 } 401 } 402 403 public static void move(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, Value input) { 404 if (isRegister(input)) { 405 if (isRegister(result)) { 406 reg2reg(crb, masm, result, asAllocatableValue(input)); 407 } else if (isStackSlot(result)) { 408 reg2stack(crb, masm, result, asAllocatableValue(input)); 409 } else { 410 throw GraalError.shouldNotReachHere(); 411 } 412 } else if (isStackSlot(input)) { 413 if (isRegister(result)) { 414 stack2reg(crb, masm, result, asAllocatableValue(input)); 415 } else { 416 throw GraalError.shouldNotReachHere(); 417 } 418 } else if (isJavaConstant(input)) { 419 if (isRegister(result)) { 420 const2reg(crb, masm, result, asJavaConstant(input)); 421 } else { 422 throw GraalError.shouldNotReachHere(); 423 } 424 } else { 425 throw GraalError.shouldNotReachHere(); 426 } 427 } 428 429 private static void reg2reg(@SuppressWarnings("unused") CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, AllocatableValue input) { 430 Register dst = asRegister(result); 431 Register src = asRegister(input); 432 if (src.equals(dst)) { 433 return; 434 } 435 AArch64Kind kind = (AArch64Kind) input.getPlatformKind(); 436 int size = kind.getSizeInBytes() * Byte.SIZE; 437 if (kind.isInteger()) { 438 masm.mov(size, dst, src); 439 } else { 440 masm.fmov(size, dst, src); 441 } 442 } 443 444 private static void reg2stack(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, AllocatableValue input) { 445 AArch64Address dest = loadStackSlotAddress(crb, masm, asStackSlot(result), Value.ILLEGAL); 446 Register src = asRegister(input); 447 AArch64Kind kind = (AArch64Kind) input.getPlatformKind(); 448 int size = kind.getSizeInBytes() * Byte.SIZE; 449 if (kind.isInteger()) { 450 masm.str(size, src, dest); 451 } else { 452 masm.fstr(size, src, dest); 453 } 454 } 455 456 private static void stack2reg(CompilationResultBuilder crb, AArch64MacroAssembler masm, AllocatableValue result, AllocatableValue input) { 457 AArch64Kind kind = (AArch64Kind) input.getPlatformKind(); 458 final int size = kind.getSizeInBytes() * Byte.SIZE; 459 if (kind.isInteger()) { 460 AArch64Address src = loadStackSlotAddress(crb, masm, asStackSlot(input), result); 461 masm.ldr(size, asRegister(result), src); 462 } else { 463 try (ScratchRegister sc = masm.getScratchRegister()) { 464 AllocatableValue scratchRegisterValue = sc.getRegister().asValue(LIRKind.combine(input)); 465 AArch64Address src = loadStackSlotAddress(crb, masm, asStackSlot(input), scratchRegisterValue); 466 masm.fldr(size, asRegister(result), src); 467 } 468 } 469 } 470 471 private static void const2reg(CompilationResultBuilder crb, AArch64MacroAssembler masm, Value result, JavaConstant input) { 472 Register dst = asRegister(result); 473 switch (input.getJavaKind().getStackKind()) { 474 case Int: 475 final int value = input.asInt(); 476 int maskedValue; 477 switch (input.getJavaKind()) { 478 case Boolean: 479 case Byte: 480 maskedValue = value & 0xFF; 481 break; 482 case Char: 483 case Short: 484 maskedValue = value & 0xFFFF; 485 break; 486 case Int: 487 maskedValue = value; 488 break; 489 default: 490 throw GraalError.shouldNotReachHere(); 491 } 492 masm.mov(dst, maskedValue); 493 break; 494 case Long: 495 masm.mov(dst, input.asLong()); 496 break; 497 case Float: 498 if (AArch64MacroAssembler.isFloatImmediate(input.asFloat())) { 499 masm.fmov(32, dst, input.asFloat()); 500 } else { 501 masm.fldr(32, dst, (AArch64Address) crb.asFloatConstRef(input)); 502 } 503 break; 504 case Double: 505 if (AArch64MacroAssembler.isDoubleImmediate(input.asDouble())) { 506 masm.fmov(64, dst, input.asDouble()); 507 } else { 508 masm.fldr(64, dst, (AArch64Address) crb.asDoubleConstRef(input)); 509 } 510 break; 511 case Object: 512 if (input.isNull()) { 513 masm.mov(dst, 0); 514 } else if (crb.target.inlineObjects) { 515 crb.recordInlineDataInCode(input); 516 masm.movNativeAddress(dst, 0xDEADDEADDEADDEADL); 517 } else { 518 masm.ldr(64, dst, (AArch64Address) crb.recordDataReferenceInCode(input, 8)); 519 } 520 break; 521 default: 522 throw GraalError.shouldNotReachHere("kind=" + input.getJavaKind().getStackKind()); 523 } 524 } 525 526 private static void const2stack(CompilationResultBuilder crb, AArch64MacroAssembler masm, Value result, JavaConstant constant) { 527 if (constant.isDefaultForKind() || constant.isNull()) { 528 AArch64Address resultAddress = (AArch64Address) crb.asAddress(result); 529 emitStore(crb, masm, (AArch64Kind) result.getPlatformKind(), resultAddress, zr.asValue(LIRKind.combine(result))); 530 } else { 531 try (ScratchRegister sc = masm.getScratchRegister()) { 532 Value scratchRegisterValue = sc.getRegister().asValue(LIRKind.combine(result)); 533 const2reg(crb, masm, scratchRegisterValue, constant); 534 AArch64Address resultAddress = (AArch64Address) crb.asAddress(result); 535 emitStore(crb, masm, (AArch64Kind) result.getPlatformKind(), resultAddress, scratchRegisterValue); 536 } 537 } 538 } 539 540 /** 541 * Returns AArch64Address of given StackSlot. We cannot use CompilationResultBuilder.asAddress 542 * since this calls AArch64MacroAssembler.makeAddress with displacements that may be larger than 543 * 9-bit signed, which cannot be handled by that method. 544 * 545 * Instead we create an address ourselves. We use scaled unsigned addressing since we know the 546 * transfersize, which gives us a 15-bit address range (for longs/doubles) respectively a 14-bit 547 * range (for everything else). 548 * 549 * @param scratch Scratch register that can be used to load address. If Value.ILLEGAL this 550 * instruction fails if we try to access a StackSlot that is too large to be loaded 551 * directly. 552 * @return AArch64Address of given StackSlot. Uses scratch register if necessary to do so. 553 */ 554 private static AArch64Address loadStackSlotAddress(CompilationResultBuilder crb, AArch64MacroAssembler masm, StackSlot slot, AllocatableValue scratch) { 555 int displacement = crb.frameMap.offsetForStackSlot(slot); 556 int transferSize = slot.getPlatformKind().getSizeInBytes(); 557 Register scratchReg = Value.ILLEGAL.equals(scratch) ? zr : asRegister(scratch); 558 return masm.makeAddress(sp, displacement, scratchReg, transferSize, /* allowOverwrite */false); 559 } 560 561 }