1 /* 2 * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.asm.aarch64; 27 28 import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.BASE_REGISTER_ONLY; 29 import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.EXTENDED_REGISTER_OFFSET; 30 import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.IMMEDIATE_SCALED; 31 import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.IMMEDIATE_UNSCALED; 32 import static org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode.REGISTER_OFFSET; 33 import static org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.ADD_TO_BASE; 34 import static org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.ADD_TO_INDEX; 35 import static org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.AddressGenerationPlan.WorkPlan.NO_WORK; 36 37 import org.graalvm.compiler.asm.BranchTargetOutOfBoundsException; 38 39 import static jdk.vm.ci.aarch64.AArch64.CPU; 40 import static jdk.vm.ci.aarch64.AArch64.r8; 41 import static jdk.vm.ci.aarch64.AArch64.r9; 42 import static jdk.vm.ci.aarch64.AArch64.sp; 43 import static jdk.vm.ci.aarch64.AArch64.zr; 44 45 import org.graalvm.compiler.asm.Label; 46 import org.graalvm.compiler.core.common.NumUtil; 47 import org.graalvm.compiler.debug.GraalError; 48 49 import jdk.vm.ci.aarch64.AArch64; 50 import jdk.vm.ci.code.Register; 51 import jdk.vm.ci.code.TargetDescription; 52 53 public class AArch64MacroAssembler extends AArch64Assembler { 54 55 private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(r8), new ScratchRegister(r9)}; 56 57 // Points to the next free scratch register 58 private int nextFreeScratchRegister = 0; 59 60 public AArch64MacroAssembler(TargetDescription target) { 61 super(target); 62 } 63 64 public class ScratchRegister implements AutoCloseable { 65 private final Register register; 66 67 public ScratchRegister(Register register) { 68 this.register = register; 69 } 70 71 public Register getRegister() { 72 return register; 73 } 74 75 @Override 76 public void close() { 77 assert nextFreeScratchRegister > 0 : "Close called too often"; 78 nextFreeScratchRegister--; 79 } 80 } 81 82 public ScratchRegister getScratchRegister() { 83 return scratchRegister[nextFreeScratchRegister++]; 84 } 85 86 /** 87 * Specifies what actions have to be taken to turn an arbitrary address of the form 88 * {@code base + displacement [+ index [<< scale]]} into a valid AArch64Address. 89 */ 90 public static class AddressGenerationPlan { 91 public final WorkPlan workPlan; 92 public final AArch64Address.AddressingMode addressingMode; 93 public final boolean needsScratch; 94 95 public enum WorkPlan { 96 /** 97 * Can be used as-is without extra work. 98 */ 99 NO_WORK, 100 /** 101 * Add scaled displacement to index register. 102 */ 103 ADD_TO_INDEX, 104 /** 105 * Add unscaled displacement to base register. 106 */ 107 ADD_TO_BASE, 108 } 109 110 /** 111 * @param workPlan Work necessary to generate a valid address. 112 * @param addressingMode Addressing mode of generated address. 113 * @param needsScratch True if generating address needs a scatch register, false otherwise. 114 */ 115 public AddressGenerationPlan(WorkPlan workPlan, AArch64Address.AddressingMode addressingMode, boolean needsScratch) { 116 this.workPlan = workPlan; 117 this.addressingMode = addressingMode; 118 this.needsScratch = needsScratch; 119 } 120 } 121 122 /** 123 * Generates an addressplan for an address of the form 124 * {@code base + displacement [+ index [<< log2(transferSize)]]} with the index register and 125 * scaling being optional. 126 * 127 * @param displacement an arbitrary displacement. 128 * @param hasIndexRegister true if the address uses an index register, false otherwise. non null 129 * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much 130 * the index register is scaled. If 0 no scaling is assumed. Can be 0, 1, 2, 4 or 8. 131 * @return AddressGenerationPlan that specifies the actions necessary to generate a valid 132 * AArch64Address for the given parameters. 133 */ 134 public static AddressGenerationPlan generateAddressPlan(long displacement, boolean hasIndexRegister, int transferSize) { 135 assert transferSize == 0 || transferSize == 1 || transferSize == 2 || transferSize == 4 || transferSize == 8; 136 boolean indexScaled = transferSize != 0; 137 int log2Scale = NumUtil.log2Ceil(transferSize); 138 long scaledDisplacement = displacement >> log2Scale; 139 boolean displacementScalable = indexScaled && (displacement & (transferSize - 1)) == 0; 140 if (displacement == 0) { 141 // register offset without any work beforehand. 142 return new AddressGenerationPlan(NO_WORK, REGISTER_OFFSET, false); 143 } else { 144 if (hasIndexRegister) { 145 if (displacementScalable) { 146 boolean needsScratch = !isArithmeticImmediate(scaledDisplacement); 147 return new AddressGenerationPlan(ADD_TO_INDEX, REGISTER_OFFSET, needsScratch); 148 } else { 149 boolean needsScratch = !isArithmeticImmediate(displacement); 150 return new AddressGenerationPlan(ADD_TO_BASE, REGISTER_OFFSET, needsScratch); 151 } 152 } else { 153 if (displacementScalable && NumUtil.isUnsignedNbit(12, scaledDisplacement)) { 154 return new AddressGenerationPlan(NO_WORK, IMMEDIATE_SCALED, false); 155 } else if (NumUtil.isSignedNbit(9, displacement)) { 156 return new AddressGenerationPlan(NO_WORK, IMMEDIATE_UNSCALED, false); 157 } else { 158 boolean needsScratch = !isArithmeticImmediate(displacement); 159 return new AddressGenerationPlan(ADD_TO_BASE, REGISTER_OFFSET, needsScratch); 160 } 161 } 162 } 163 } 164 165 /** 166 * Returns an AArch64Address pointing to 167 * {@code base + displacement + index << log2(transferSize)}. 168 * 169 * @param base general purpose register. May not be null or the zero register. 170 * @param displacement arbitrary displacement added to base. 171 * @param index general purpose register. May not be null or the stack pointer. 172 * @param signExtendIndex if true consider index register a word register that should be 173 * sign-extended before being added. 174 * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much 175 * the index register is scaled. If 0 no scaling is assumed. Can be 0, 1, 2, 4 or 8. 176 * @param additionalReg additional register used either as a scratch register or as part of the 177 * final address, depending on whether allowOverwrite is true or not. May not be null 178 * or stackpointer. 179 * @param allowOverwrite if true allows to change value of base or index register to generate 180 * address. 181 * @return AArch64Address pointing to memory at 182 * {@code base + displacement + index << log2(transferSize)}. 183 */ 184 public AArch64Address makeAddress(Register base, long displacement, Register index, boolean signExtendIndex, int transferSize, Register additionalReg, boolean allowOverwrite) { 185 AddressGenerationPlan plan = generateAddressPlan(displacement, !index.equals(zr), transferSize); 186 assert allowOverwrite || !zr.equals(additionalReg) || plan.workPlan == NO_WORK; 187 assert !plan.needsScratch || !zr.equals(additionalReg); 188 int log2Scale = NumUtil.log2Ceil(transferSize); 189 long scaledDisplacement = displacement >> log2Scale; 190 Register newIndex = index; 191 Register newBase = base; 192 int immediate; 193 switch (plan.workPlan) { 194 case NO_WORK: 195 if (plan.addressingMode == IMMEDIATE_SCALED) { 196 immediate = (int) scaledDisplacement; 197 } else { 198 immediate = (int) displacement; 199 } 200 break; 201 case ADD_TO_INDEX: 202 newIndex = allowOverwrite ? index : additionalReg; 203 assert !newIndex.equals(sp) && !newIndex.equals(zr); 204 if (plan.needsScratch) { 205 mov(additionalReg, scaledDisplacement); 206 add(signExtendIndex ? 32 : 64, newIndex, index, additionalReg); 207 } else { 208 add(signExtendIndex ? 32 : 64, newIndex, index, (int) scaledDisplacement); 209 } 210 immediate = 0; 211 break; 212 case ADD_TO_BASE: 213 newBase = allowOverwrite ? base : additionalReg; 214 assert !newBase.equals(sp) && !newBase.equals(zr); 215 if (plan.needsScratch) { 216 mov(additionalReg, displacement); 217 add(64, newBase, base, additionalReg); 218 } else { 219 add(64, newBase, base, (int) displacement); 220 } 221 immediate = 0; 222 break; 223 default: 224 throw GraalError.shouldNotReachHere(); 225 } 226 AArch64Address.AddressingMode addressingMode = plan.addressingMode; 227 ExtendType extendType = null; 228 if (addressingMode == REGISTER_OFFSET) { 229 if (newIndex.equals(zr)) { 230 addressingMode = BASE_REGISTER_ONLY; 231 } else if (signExtendIndex) { 232 addressingMode = EXTENDED_REGISTER_OFFSET; 233 extendType = ExtendType.SXTW; 234 } 235 } 236 return AArch64Address.createAddress(addressingMode, newBase, newIndex, immediate, transferSize != 0, extendType); 237 } 238 239 /** 240 * Returns an AArch64Address pointing to {@code base + displacement}. Specifies the memory 241 * transfer size to allow some optimizations when building the address. 242 * 243 * @param base general purpose register. May not be null or the zero register. 244 * @param displacement arbitrary displacement added to base. 245 * @param transferSize the memory transfer size in bytes. 246 * @param additionalReg additional register used either as a scratch register or as part of the 247 * final address, depending on whether allowOverwrite is true or not. May not be 248 * null, zero register or stackpointer. 249 * @param allowOverwrite if true allows to change value of base or index register to generate 250 * address. 251 * @return AArch64Address pointing to memory at {@code base + displacement}. 252 */ 253 public AArch64Address makeAddress(Register base, long displacement, Register additionalReg, int transferSize, boolean allowOverwrite) { 254 assert additionalReg.getRegisterCategory().equals(CPU); 255 return makeAddress(base, displacement, zr, /* sign-extend */false, transferSize, additionalReg, allowOverwrite); 256 } 257 258 /** 259 * Returns an AArch64Address pointing to {@code base + displacement}. Fails if address cannot be 260 * represented without overwriting base register or using a scratch register. 261 * 262 * @param base general purpose register. May not be null or the zero register. 263 * @param displacement arbitrary displacement added to base. 264 * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much 265 * the index register is scaled. If 0 no scaling is assumed. Can be 0, 1, 2, 4 or 8. 266 * @return AArch64Address pointing to memory at {@code base + displacement}. 267 */ 268 public AArch64Address makeAddress(Register base, long displacement, int transferSize) { 269 return makeAddress(base, displacement, zr, /* signExtend */false, // 270 transferSize, zr, /* allowOverwrite */false); 271 } 272 273 /** 274 * Loads memory address into register. 275 * 276 * @param dst general purpose register. May not be null, zero-register or stackpointer. 277 * @param address address whose value is loaded into dst. May not be null, 278 * {@link org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode#IMMEDIATE_POST_INDEXED 279 * POST_INDEXED} or 280 * {@link org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode#IMMEDIATE_PRE_INDEXED 281 * IMMEDIATE_PRE_INDEXED} 282 * @param transferSize the memory transfer size in bytes. The log2 of this specifies how much 283 * the index register is scaled. Can be 1, 2, 4 or 8. 284 */ 285 public void loadAddress(Register dst, AArch64Address address, int transferSize) { 286 assert transferSize == 1 || transferSize == 2 || transferSize == 4 || transferSize == 8; 287 assert dst.getRegisterCategory().equals(CPU); 288 int shiftAmt = NumUtil.log2Ceil(transferSize); 289 switch (address.getAddressingMode()) { 290 case IMMEDIATE_SCALED: 291 int scaledImmediate = address.getImmediateRaw() << shiftAmt; 292 int lowerBits = scaledImmediate & NumUtil.getNbitNumberInt(12); 293 int higherBits = scaledImmediate & ~NumUtil.getNbitNumberInt(12); 294 boolean firstAdd = true; 295 if (lowerBits != 0) { 296 add(64, dst, address.getBase(), lowerBits); 297 firstAdd = false; 298 } 299 if (higherBits != 0) { 300 Register src = firstAdd ? address.getBase() : dst; 301 add(64, dst, src, higherBits); 302 } 303 break; 304 case IMMEDIATE_UNSCALED: 305 int immediate = address.getImmediateRaw(); 306 add(64, dst, address.getBase(), immediate); 307 break; 308 case REGISTER_OFFSET: 309 add(64, dst, address.getBase(), address.getOffset(), ShiftType.LSL, address.isScaled() ? shiftAmt : 0); 310 break; 311 case EXTENDED_REGISTER_OFFSET: 312 add(64, dst, address.getBase(), address.getOffset(), address.getExtendType(), address.isScaled() ? shiftAmt : 0); 313 break; 314 case PC_LITERAL: { 315 addressOf(dst); 316 break; 317 } 318 case BASE_REGISTER_ONLY: 319 movx(dst, address.getBase()); 320 break; 321 default: 322 throw GraalError.shouldNotReachHere(); 323 } 324 } 325 326 public void movx(Register dst, Register src) { 327 mov(64, dst, src); 328 } 329 330 public void mov(int size, Register dst, Register src) { 331 if (dst.equals(sp) || src.equals(sp)) { 332 add(size, dst, src, 0); 333 } else { 334 or(size, dst, zr, src); 335 } 336 } 337 338 /** 339 * Generates a 64-bit immediate move code sequence. 340 * 341 * @param dst general purpose register. May not be null, stackpointer or zero-register. 342 * @param imm the value to move into the register 343 * @param annotateImm Flag denoting if annotation should be added. 344 */ 345 private void mov64(Register dst, long imm, boolean annotateImm) { 346 // We have to move all non zero parts of the immediate in 16-bit chunks 347 int numMovs = 0; 348 int pos = position(); 349 boolean firstMove = true; 350 for (int offset = 0; offset < 64; offset += 16) { 351 int chunk = (int) (imm >> offset) & NumUtil.getNbitNumberInt(16); 352 if (chunk == 0) { 353 continue; 354 } 355 if (firstMove) { 356 movz(64, dst, chunk, offset); 357 firstMove = false; 358 } else { 359 movk(64, dst, chunk, offset); 360 } 361 ++numMovs; 362 } 363 assert !firstMove; 364 if (annotateImm) { 365 annotateImmediateMovSequence(pos, numMovs); 366 } 367 } 368 369 /** 370 * Loads immediate into register. 371 * 372 * @param dst general purpose register. May not be null, zero-register or stackpointer. 373 * @param imm immediate loaded into register. 374 */ 375 public void mov(Register dst, long imm) { 376 mov(dst, imm, false); 377 } 378 379 /** 380 * Loads immediate into register. 381 * 382 * @param dst general purpose register. May not be null, zero-register or stackpointer. 383 * @param imm immediate loaded into register. 384 * @param annotateImm Flag to signal of the immediate value should be annotated. 385 */ 386 public void mov(Register dst, long imm, boolean annotateImm) { 387 assert dst.getRegisterCategory().equals(CPU); 388 if (imm == 0L) { 389 movx(dst, zr); 390 } else if (LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO) { 391 or(64, dst, zr, imm); 392 } else if (imm >> 32 == -1L && (int) imm < 0 && LogicalImmediateTable.isRepresentable((int) imm) != LogicalImmediateTable.Representable.NO) { 393 // If the higher 32-bit are 1s and the sign bit of the lower 32-bits is set *and* we can 394 // represent the lower 32 bits as a logical immediate we can create the lower 32-bit and 395 // then sign extend 396 // them. This allows us to cover immediates like ~1L with 2 instructions. 397 mov(dst, (int) imm); 398 sxt(64, 32, dst, dst); 399 } else { 400 mov64(dst, imm, annotateImm); 401 } 402 } 403 404 /** 405 * Loads immediate into register. 406 * 407 * @param dst general purpose register. May not be null, zero-register or stackpointer. 408 * @param imm immediate loaded into register. 409 */ 410 public void mov(Register dst, int imm) { 411 mov(dst, imm & 0xFFFF_FFFFL); 412 } 413 414 /** 415 * Generates a 48-bit immediate move code sequence. The immediate may later be updated by 416 * HotSpot. 417 * 418 * In AArch64 mode the virtual address space is 48-bits in size, so we only need three 419 * instructions to create a patchable instruction sequence that can reach anywhere. 420 * 421 * @param dst general purpose register. May not be null, stackpointer or zero-register. 422 * @param imm 423 */ 424 public void movNativeAddress(Register dst, long imm) { 425 movNativeAddress(dst, imm, false); 426 } 427 428 /** 429 * Generates a 48-bit immediate move code sequence. The immediate may later be updated by 430 * HotSpot. 431 * 432 * In AArch64 mode the virtual address space is 48-bits in size, so we only need three 433 * instructions to create a patchable instruction sequence that can reach anywhere. 434 * 435 * @param dst general purpose register. May not be null, stackpointer or zero-register. 436 * @param imm The immediate address 437 * @param annotateImm Flag to signal of the immediate value should be annotated. 438 */ 439 public void movNativeAddress(Register dst, long imm, boolean annotateImm) { 440 assert (imm & 0xFFFF_0000_0000_0000L) == 0; 441 // We have to move all non zero parts of the immediate in 16-bit chunks 442 boolean firstMove = true; 443 int pos = position(); 444 for (int offset = 0; offset < 48; offset += 16) { 445 int chunk = (int) (imm >> offset) & NumUtil.getNbitNumberInt(16); 446 if (firstMove) { 447 movz(64, dst, chunk, offset); 448 firstMove = false; 449 } else { 450 movk(64, dst, chunk, offset); 451 } 452 } 453 if (annotateImm) { 454 annotateImmediateMovSequence(pos, 3); 455 } 456 assert !firstMove; 457 } 458 459 /** 460 * Generates a 32-bit immediate move code sequence. The immediate may later be updated by 461 * HotSpot. 462 * 463 * @param dst general purpose register. May not be null, stackpointer or zero-register. 464 * @param imm 465 */ 466 public void movNarrowAddress(Register dst, long imm) { 467 assert (imm & 0xFFFF_FFFF_0000_0000L) == 0; 468 movz(64, dst, (int) (imm >>> 16), 16); 469 movk(64, dst, (int) (imm & 0xffff), 0); 470 } 471 472 /** 473 * @return Number of instructions necessary to load immediate into register. 474 */ 475 public static int nrInstructionsToMoveImmediate(long imm) { 476 if (imm == 0L || LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO) { 477 return 1; 478 } 479 if (imm >> 32 == -1L && (int) imm < 0 && LogicalImmediateTable.isRepresentable((int) imm) != LogicalImmediateTable.Representable.NO) { 480 // If the higher 32-bit are 1s and the sign bit of the lower 32-bits is set *and* we can 481 // represent the lower 32 bits as a logical immediate we can create the lower 32-bit and 482 // then sign extend 483 // them. This allows us to cover immediates like ~1L with 2 instructions. 484 return 2; 485 } 486 int nrInstructions = 0; 487 for (int offset = 0; offset < 64; offset += 16) { 488 int part = (int) (imm >> offset) & NumUtil.getNbitNumberInt(16); 489 if (part != 0) { 490 nrInstructions++; 491 } 492 } 493 return nrInstructions; 494 } 495 496 /** 497 * Loads a srcSize value from address into rt sign-extending it if necessary. 498 * 499 * @param targetSize size of target register in bits. Must be 32 or 64. 500 * @param srcSize size of memory read in bits. Must be 8, 16 or 32 and smaller or equal to 501 * targetSize. 502 * @param rt general purpose register. May not be null or stackpointer. 503 * @param address all addressing modes allowed. May not be null. 504 */ 505 @Override 506 public void ldrs(int targetSize, int srcSize, Register rt, AArch64Address address) { 507 assert targetSize == 32 || targetSize == 64; 508 assert srcSize <= targetSize; 509 if (targetSize == srcSize) { 510 super.ldr(srcSize, rt, address); 511 } else { 512 super.ldrs(targetSize, srcSize, rt, address); 513 } 514 } 515 516 /** 517 * Loads a srcSize value from address into rt zero-extending it if necessary. 518 * 519 * @param srcSize size of memory read in bits. Must be 8, 16 or 32 and smaller or equal to 520 * targetSize. 521 * @param rt general purpose register. May not be null or stackpointer. 522 * @param address all addressing modes allowed. May not be null. 523 */ 524 @Override 525 public void ldr(int srcSize, Register rt, AArch64Address address) { 526 super.ldr(srcSize, rt, address); 527 } 528 529 /** 530 * Conditional move. dst = src1 if condition else src2. 531 * 532 * @param size register size. Has to be 32 or 64. 533 * @param result general purpose register. May not be null or the stackpointer. 534 * @param trueValue general purpose register. May not be null or the stackpointer. 535 * @param falseValue general purpose register. May not be null or the stackpointer. 536 * @param cond any condition flag. May not be null. 537 */ 538 public void cmov(int size, Register result, Register trueValue, Register falseValue, ConditionFlag cond) { 539 super.csel(size, result, trueValue, falseValue, cond); 540 } 541 542 /** 543 * Conditional set. dst = 1 if condition else 0. 544 * 545 * @param dst general purpose register. May not be null or stackpointer. 546 * @param condition any condition. May not be null. 547 */ 548 public void cset(int size, Register dst, ConditionFlag condition) { 549 super.csinc(size, dst, zr, zr, condition.negate()); 550 } 551 552 /** 553 * dst = src1 + src2. 554 * 555 * @param size register size. Has to be 32 or 64. 556 * @param dst general purpose register. May not be null. 557 * @param src1 general purpose register. May not be null. 558 * @param src2 general purpose register. May not be null or stackpointer. 559 */ 560 public void add(int size, Register dst, Register src1, Register src2) { 561 if (dst.equals(sp) || src1.equals(sp)) { 562 super.add(size, dst, src1, src2, ExtendType.UXTX, 0); 563 } else { 564 super.add(size, dst, src1, src2, ShiftType.LSL, 0); 565 } 566 } 567 568 /** 569 * dst = src1 + src2 and sets condition flags. 570 * 571 * @param size register size. Has to be 32 or 64. 572 * @param dst general purpose register. May not be null. 573 * @param src1 general purpose register. May not be null. 574 * @param src2 general purpose register. May not be null or stackpointer. 575 */ 576 public void adds(int size, Register dst, Register src1, Register src2) { 577 if (dst.equals(sp) || src1.equals(sp)) { 578 super.adds(size, dst, src1, src2, ExtendType.UXTX, 0); 579 } else { 580 super.adds(size, dst, src1, src2, ShiftType.LSL, 0); 581 } 582 } 583 584 /** 585 * dst = src1 - src2 and sets condition flags. 586 * 587 * @param size register size. Has to be 32 or 64. 588 * @param dst general purpose register. May not be null. 589 * @param src1 general purpose register. May not be null. 590 * @param src2 general purpose register. May not be null or stackpointer. 591 */ 592 public void subs(int size, Register dst, Register src1, Register src2) { 593 if (dst.equals(sp) || src1.equals(sp)) { 594 super.subs(size, dst, src1, src2, ExtendType.UXTX, 0); 595 } else { 596 super.subs(size, dst, src1, src2, ShiftType.LSL, 0); 597 } 598 } 599 600 /** 601 * dst = src1 - src2. 602 * 603 * @param size register size. Has to be 32 or 64. 604 * @param dst general purpose register. May not be null. 605 * @param src1 general purpose register. May not be null. 606 * @param src2 general purpose register. May not be null or stackpointer. 607 */ 608 public void sub(int size, Register dst, Register src1, Register src2) { 609 if (dst.equals(sp) || src1.equals(sp)) { 610 super.sub(size, dst, src1, src2, ExtendType.UXTX, 0); 611 } else { 612 super.sub(size, dst, src1, src2, ShiftType.LSL, 0); 613 } 614 } 615 616 /** 617 * dst = src1 + shiftType(src2, shiftAmt & (size - 1)). 618 * 619 * @param size register size. Has to be 32 or 64. 620 * @param dst general purpose register. May not be null or stackpointer. 621 * @param src1 general purpose register. May not be null or stackpointer. 622 * @param src2 general purpose register. May not be null or stackpointer. 623 * @param shiftType any type but ROR. 624 * @param shiftAmt arbitrary shift amount. 625 */ 626 @Override 627 public void add(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 628 int shift = clampShiftAmt(size, shiftAmt); 629 super.add(size, dst, src1, src2, shiftType, shift); 630 } 631 632 /** 633 * dst = src1 + shiftType(src2, shiftAmt & (size-1)) and sets condition flags. 634 * 635 * @param size register size. Has to be 32 or 64. 636 * @param dst general purpose register. May not be null or stackpointer. 637 * @param src1 general purpose register. May not be null or stackpointer. 638 * @param src2 general purpose register. May not be null or stackpointer. 639 * @param shiftType any type but ROR. 640 * @param shiftAmt arbitrary shift amount. 641 */ 642 @Override 643 public void sub(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 644 int shift = clampShiftAmt(size, shiftAmt); 645 super.sub(size, dst, src1, src2, shiftType, shift); 646 } 647 648 /** 649 * dst = -src1. 650 * 651 * @param size register size. Has to be 32 or 64. 652 * @param dst general purpose register. May not be null or stackpointer. 653 * @param src general purpose register. May not be null or stackpointer. 654 */ 655 public void neg(int size, Register dst, Register src) { 656 sub(size, dst, zr, src); 657 } 658 659 /** 660 * dst = src + immediate. 661 * 662 * @param size register size. Has to be 32 or 64. 663 * @param dst general purpose register. May not be null or zero-register. 664 * @param src general purpose register. May not be null or zero-register. 665 * @param immediate 32-bit signed int 666 */ 667 @Override 668 public void add(int size, Register dst, Register src, int immediate) { 669 assert (!dst.equals(zr) && !src.equals(zr)); 670 if (immediate < 0) { 671 sub(size, dst, src, -immediate); 672 } else if (isAimm(immediate)) { 673 if (!(dst.equals(src) && immediate == 0)) { 674 super.add(size, dst, src, immediate); 675 } 676 } else if (immediate >= -(1 << 24) && immediate < (1 << 24)) { 677 super.add(size, dst, src, immediate & -(1 << 12)); 678 super.add(size, dst, dst, immediate & ((1 << 12) - 1)); 679 } else { 680 assert !dst.equals(src); 681 mov(dst, immediate); 682 add(size, src, dst, dst); 683 } 684 } 685 686 /** 687 * dst = src + immediate. 688 * 689 * @param size register size. Has to be 32 or 64. 690 * @param dst general purpose register. May not be null or zero-register. 691 * @param src general purpose register. May not be null or zero-register. 692 * @param immediate 64-bit signed int 693 */ 694 public void add(int size, Register dst, Register src, long immediate) { 695 if (NumUtil.isInt(immediate)) { 696 add(size, dst, src, (int) immediate); 697 } else { 698 assert (!dst.equals(zr) && !src.equals(zr)); 699 assert !dst.equals(src); 700 assert size == 64; 701 mov(dst, immediate); 702 add(size, src, dst, dst); 703 } 704 } 705 706 /** 707 * dst = src + aimm and sets condition flags. 708 * 709 * @param size register size. Has to be 32 or 64. 710 * @param dst general purpose register. May not be null or stackpointer. 711 * @param src general purpose register. May not be null or zero-register. 712 * @param immediate arithmetic immediate. 713 */ 714 @Override 715 public void adds(int size, Register dst, Register src, int immediate) { 716 assert (!dst.equals(sp) && !src.equals(zr)); 717 if (immediate < 0) { 718 subs(size, dst, src, -immediate); 719 } else if (!(dst.equals(src) && immediate == 0)) { 720 super.adds(size, dst, src, immediate); 721 } 722 } 723 724 /** 725 * dst = src - immediate. 726 * 727 * @param size register size. Has to be 32 or 64. 728 * @param dst general purpose register. May not be null or zero-register. 729 * @param src general purpose register. May not be null or zero-register. 730 * @param immediate 32-bit signed int 731 */ 732 @Override 733 public void sub(int size, Register dst, Register src, int immediate) { 734 assert (!dst.equals(zr) && !src.equals(zr)); 735 if (immediate < 0) { 736 add(size, dst, src, -immediate); 737 } else if (isAimm(immediate)) { 738 if (!(dst.equals(src) && immediate == 0)) { 739 super.sub(size, dst, src, immediate); 740 } 741 } else if (immediate >= -(1 << 24) && immediate < (1 << 24)) { 742 super.sub(size, dst, src, immediate & -(1 << 12)); 743 super.sub(size, dst, dst, immediate & ((1 << 12) - 1)); 744 } else { 745 assert !dst.equals(src); 746 mov(dst, immediate); 747 sub(size, src, dst, dst); 748 } 749 } 750 751 /** 752 * dst = src - aimm and sets condition flags. 753 * 754 * @param size register size. Has to be 32 or 64. 755 * @param dst general purpose register. May not be null or stackpointer. 756 * @param src general purpose register. May not be null or zero-register. 757 * @param immediate arithmetic immediate. 758 */ 759 @Override 760 public void subs(int size, Register dst, Register src, int immediate) { 761 assert (!dst.equals(sp) && !src.equals(zr)); 762 if (immediate < 0) { 763 adds(size, dst, src, -immediate); 764 } else if (!dst.equals(src) || immediate != 0) { 765 super.subs(size, dst, src, immediate); 766 } 767 } 768 769 /** 770 * dst = src1 * src2. 771 * 772 * @param size register size. Has to be 32 or 64. 773 * @param dst general purpose register. May not be null or the stackpointer. 774 * @param src1 general purpose register. May not be null or the stackpointer. 775 * @param src2 general purpose register. May not be null or the stackpointer. 776 */ 777 public void mul(int size, Register dst, Register src1, Register src2) { 778 super.madd(size, dst, src1, src2, zr); 779 } 780 781 /** 782 * dst = src3 + src1 * src2. 783 * 784 * @param size register size. Has to be 32 or 64. 785 * @param dst general purpose register. May not be null or the stackpointer. 786 * @param src1 general purpose register. May not be null or the stackpointer. 787 * @param src2 general purpose register. May not be null or the stackpointer. 788 * @param src3 general purpose register. May not be null or the stackpointer. 789 */ 790 @Override 791 public void madd(int size, Register dst, Register src1, Register src2, Register src3) { 792 super.madd(size, dst, src1, src2, src3); 793 } 794 795 /** 796 * dst = src3 - src1 * src2. 797 * 798 * @param size register size. Has to be 32 or 64. 799 * @param dst general purpose register. May not be null or the stackpointer. 800 * @param src1 general purpose register. May not be null or the stackpointer. 801 * @param src2 general purpose register. May not be null or the stackpointer. 802 * @param src3 general purpose register. May not be null or the stackpointer. 803 */ 804 @Override 805 public void msub(int size, Register dst, Register src1, Register src2, Register src3) { 806 super.msub(size, dst, src1, src2, src3); 807 } 808 809 /** 810 * dst = 0 - src1 * src2. 811 * 812 * @param size register size. Has to be 32 or 64. 813 * @param dst general purpose register. May not be null or the stackpointer. 814 * @param src1 general purpose register. May not be null or the stackpointer. 815 * @param src2 general purpose register. May not be null or the stackpointer. 816 */ 817 public void mneg(int size, Register dst, Register src1, Register src2) { 818 super.msub(size, dst, src1, src2, zr); 819 } 820 821 /** 822 * unsigned multiply high. dst = (src1 * src2) >> size 823 * 824 * @param size register size. Has to be 32 or 64. 825 * @param dst general purpose register. May not be null or the stackpointer. 826 * @param src1 general purpose register. May not be null or the stackpointer. 827 * @param src2 general purpose register. May not be null or the stackpointer. 828 */ 829 public void umulh(int size, Register dst, Register src1, Register src2) { 830 assert (!dst.equals(sp) && !src1.equals(sp) && !src2.equals(sp)); 831 assert size == 32 || size == 64; 832 if (size == 64) { 833 super.umulh(dst, src1, src2); 834 } else { 835 // xDst = wSrc1 * wSrc2 836 super.umaddl(dst, src1, src2, zr); 837 // xDst = xDst >> 32 838 lshr(64, dst, dst, 32); 839 } 840 } 841 842 /** 843 * signed multiply high. dst = (src1 * src2) >> size 844 * 845 * @param size register size. Has to be 32 or 64. 846 * @param dst general purpose register. May not be null or the stackpointer. 847 * @param src1 general purpose register. May not be null or the stackpointer. 848 * @param src2 general purpose register. May not be null or the stackpointer. 849 */ 850 public void smulh(int size, Register dst, Register src1, Register src2) { 851 assert (!dst.equals(sp) && !src1.equals(sp) && !src2.equals(sp)); 852 assert size == 32 || size == 64; 853 if (size == 64) { 854 super.smulh(dst, src1, src2); 855 } else { 856 // xDst = wSrc1 * wSrc2 857 super.smaddl(dst, src1, src2, zr); 858 // xDst = xDst >> 32 859 lshr(64, dst, dst, 32); 860 } 861 } 862 863 /** 864 * dst = src1 % src2. Signed. 865 * 866 * @param size register size. Has to be 32 or 64. 867 * @param dst general purpose register. May not be null or the stackpointer. 868 * @param n numerator. General purpose register. May not be null or the stackpointer. 869 * @param d denominator. General purpose register. Divisor May not be null or the stackpointer. 870 */ 871 public void rem(int size, Register dst, Register n, Register d) { 872 assert (!dst.equals(sp) && !n.equals(sp) && !d.equals(sp)); 873 // There is no irem or similar instruction. Instead we use the relation: 874 // n % d = n - Floor(n / d) * d if nd >= 0 875 // n % d = n - Ceil(n / d) * d else 876 // Which is equivalent to n - TruncatingDivision(n, d) * d 877 super.sdiv(size, dst, n, d); 878 super.msub(size, dst, dst, d, n); 879 } 880 881 /** 882 * dst = src1 % src2. Unsigned. 883 * 884 * @param size register size. Has to be 32 or 64. 885 * @param dst general purpose register. May not be null or the stackpointer. 886 * @param n numerator. General purpose register. May not be null or the stackpointer. 887 * @param d denominator. General purpose register. Divisor May not be null or the stackpointer. 888 */ 889 public void urem(int size, Register dst, Register n, Register d) { 890 // There is no irem or similar instruction. Instead we use the relation: 891 // n % d = n - Floor(n / d) * d 892 // Which is equivalent to n - TruncatingDivision(n, d) * d 893 super.udiv(size, dst, n, d); 894 super.msub(size, dst, dst, d, n); 895 } 896 897 /** 898 * Add/subtract instruction encoding supports 12-bit immediate values. 899 * 900 * @param imm immediate value to be tested. 901 * @return true if immediate can be used directly for arithmetic instructions (add/sub), false 902 * otherwise. 903 */ 904 public static boolean isArithmeticImmediate(long imm) { 905 // If we have a negative immediate we just use the opposite operator. I.e.: x - (-5) == x + 906 // 5. 907 return NumUtil.isInt(Math.abs(imm)) && isAimm((int) Math.abs(imm)); 908 } 909 910 /** 911 * Compare instructions are add/subtract instructions and so support 12-bit immediate values. 912 * 913 * @param imm immediate value to be tested. 914 * @return true if immediate can be used directly with comparison instructions, false otherwise. 915 */ 916 public static boolean isComparisonImmediate(long imm) { 917 return isArithmeticImmediate(imm); 918 } 919 920 /** 921 * Move wide immediate instruction encoding supports 16-bit immediate values which can be 922 * optionally-shifted by multiples of 16 (i.e. 0, 16, 32, 48). 923 * 924 * @return true if immediate can be moved directly into a register, false otherwise. 925 */ 926 public static boolean isMovableImmediate(long imm) { 927 // // Positions of first, respectively last set bit. 928 // int start = Long.numberOfTrailingZeros(imm); 929 // int end = 64 - Long.numberOfLeadingZeros(imm); 930 // int length = end - start; 931 // if (length > 16) { 932 // return false; 933 // } 934 // // We can shift the necessary part of the immediate (i.e. everything between the first 935 // and 936 // // last set bit) by as much as 16 - length around to arrive at a valid shift amount 937 // int tolerance = 16 - length; 938 // int prevMultiple = NumUtil.roundDown(start, 16); 939 // int nextMultiple = NumUtil.roundUp(start, 16); 940 // return start - prevMultiple <= tolerance || nextMultiple - start <= tolerance; 941 /* 942 * This is a bit optimistic because the constant could also be for an arithmetic instruction 943 * which only supports 12-bits. That case needs to be handled in the backend. 944 */ 945 return NumUtil.isInt(Math.abs(imm)) && NumUtil.isUnsignedNbit(16, (int) Math.abs(imm)); 946 } 947 948 /** 949 * dst = src << (shiftAmt & (size - 1)). 950 * 951 * @param size register size. Has to be 32 or 64. 952 * @param dst general purpose register. May not be null, stackpointer or zero-register. 953 * @param src general purpose register. May not be null, stackpointer or zero-register. 954 * @param shiftAmt amount by which src is shifted. 955 */ 956 public void shl(int size, Register dst, Register src, long shiftAmt) { 957 int shift = clampShiftAmt(size, shiftAmt); 958 super.ubfm(size, dst, src, (size - shift) & (size - 1), size - 1 - shift); 959 } 960 961 /** 962 * dst = src1 << (src2 & (size - 1)). 963 * 964 * @param size register size. Has to be 32 or 64. 965 * @param dst general purpose register. May not be null or stackpointer. 966 * @param src general purpose register. May not be null or stackpointer. 967 * @param shift general purpose register. May not be null or stackpointer. 968 */ 969 public void shl(int size, Register dst, Register src, Register shift) { 970 super.lsl(size, dst, src, shift); 971 } 972 973 /** 974 * dst = src >>> (shiftAmt & (size - 1)). 975 * 976 * @param size register size. Has to be 32 or 64. 977 * @param dst general purpose register. May not be null, stackpointer or zero-register. 978 * @param src general purpose register. May not be null, stackpointer or zero-register. 979 * @param shiftAmt amount by which src is shifted. 980 */ 981 public void lshr(int size, Register dst, Register src, long shiftAmt) { 982 int shift = clampShiftAmt(size, shiftAmt); 983 super.ubfm(size, dst, src, shift, size - 1); 984 } 985 986 /** 987 * dst = src1 >>> (src2 & (size - 1)). 988 * 989 * @param size register size. Has to be 32 or 64. 990 * @param dst general purpose register. May not be null or stackpointer. 991 * @param src general purpose register. May not be null or stackpointer. 992 * @param shift general purpose register. May not be null or stackpointer. 993 */ 994 public void lshr(int size, Register dst, Register src, Register shift) { 995 super.lsr(size, dst, src, shift); 996 } 997 998 /** 999 * dst = src >> (shiftAmt & log2(size)). 1000 * 1001 * @param size register size. Has to be 32 or 64. 1002 * @param dst general purpose register. May not be null, stackpointer or zero-register. 1003 * @param src general purpose register. May not be null, stackpointer or zero-register. 1004 * @param shiftAmt amount by which src is shifted. 1005 */ 1006 public void ashr(int size, Register dst, Register src, long shiftAmt) { 1007 int shift = clampShiftAmt(size, shiftAmt); 1008 super.sbfm(size, dst, src, shift, size - 1); 1009 } 1010 1011 /** 1012 * dst = src1 >> (src2 & log2(size)). 1013 * 1014 * @param size register size. Has to be 32 or 64. 1015 * @param dst general purpose register. May not be null or stackpointer. 1016 * @param src general purpose register. May not be null or stackpointer. 1017 * @param shift general purpose register. May not be null or stackpointer. 1018 */ 1019 public void ashr(int size, Register dst, Register src, Register shift) { 1020 super.asr(size, dst, src, shift); 1021 } 1022 1023 /** 1024 * Clamps shiftAmt into range 0 <= shiftamt < size according to JLS. 1025 * 1026 * @param size size of operation. 1027 * @param shiftAmt arbitrary shift amount. 1028 * @return value between 0 and size - 1 inclusive that is equivalent to shiftAmt according to 1029 * JLS. 1030 */ 1031 private static int clampShiftAmt(int size, long shiftAmt) { 1032 return (int) (shiftAmt & (size - 1)); 1033 } 1034 1035 /** 1036 * dst = src1 & src2. 1037 * 1038 * @param size register size. Has to be 32 or 64. 1039 * @param dst general purpose register. May not be null or stackpointer. 1040 * @param src1 general purpose register. May not be null or stackpointer. 1041 * @param src2 general purpose register. May not be null or stackpointer. 1042 */ 1043 public void and(int size, Register dst, Register src1, Register src2) { 1044 super.and(size, dst, src1, src2, ShiftType.LSL, 0); 1045 } 1046 1047 /** 1048 * dst = src1 ^ src2. 1049 * 1050 * @param size register size. Has to be 32 or 64. 1051 * @param dst general purpose register. May not be null or stackpointer. 1052 * @param src1 general purpose register. May not be null or stackpointer. 1053 * @param src2 general purpose register. May not be null or stackpointer. 1054 */ 1055 public void eor(int size, Register dst, Register src1, Register src2) { 1056 super.eor(size, dst, src1, src2, ShiftType.LSL, 0); 1057 } 1058 1059 /** 1060 * dst = src1 | src2. 1061 * 1062 * @param size register size. Has to be 32 or 64. 1063 * @param dst general purpose register. May not be null or stackpointer. 1064 * @param src1 general purpose register. May not be null or stackpointer. 1065 * @param src2 general purpose register. May not be null or stackpointer. 1066 */ 1067 public void or(int size, Register dst, Register src1, Register src2) { 1068 super.orr(size, dst, src1, src2, ShiftType.LSL, 0); 1069 } 1070 1071 /** 1072 * dst = src | bimm. 1073 * 1074 * @param size register size. Has to be 32 or 64. 1075 * @param dst general purpose register. May not be null or zero-register. 1076 * @param src general purpose register. May not be null or stack-pointer. 1077 * @param bimm logical immediate. See {@link AArch64Assembler.LogicalImmediateTable} for exact 1078 * definition. 1079 */ 1080 public void or(int size, Register dst, Register src, long bimm) { 1081 super.orr(size, dst, src, bimm); 1082 } 1083 1084 /** 1085 * dst = ~src. 1086 * 1087 * @param size register size. Has to be 32 or 64. 1088 * @param dst general purpose register. May not be null or stackpointer. 1089 * @param src general purpose register. May not be null or stackpointer. 1090 */ 1091 public void not(int size, Register dst, Register src) { 1092 super.orn(size, dst, zr, src, ShiftType.LSL, 0); 1093 } 1094 1095 /** 1096 * dst = src1 & shiftType(src2, imm). 1097 * 1098 * @param size register size. Has to be 32 or 64. 1099 * @param dst general purpose register. May not be null or stackpointer. 1100 * @param src1 general purpose register. May not be null or stackpointer. 1101 * @param src2 general purpose register. May not be null or stackpointer. 1102 * @param shiftType all types allowed, may not be null. 1103 * @param shiftAmt must be in range 0 to size - 1. 1104 */ 1105 @Override 1106 public void and(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 1107 super.and(size, dst, src1, src2, shiftType, shiftAmt); 1108 } 1109 1110 /** 1111 * dst = src1 ^ shiftType(src2, imm). 1112 * 1113 * @param size register size. Has to be 32 or 64. 1114 * @param dst general purpose register. May not be null or stackpointer. 1115 * @param src1 general purpose register. May not be null or stackpointer. 1116 * @param src2 general purpose register. May not be null or stackpointer. 1117 * @param shiftType all types allowed, may not be null. 1118 * @param shiftAmt must be in range 0 to size - 1. 1119 */ 1120 @Override 1121 public void eor(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 1122 super.eor(size, dst, src1, src2, shiftType, shiftAmt); 1123 } 1124 1125 /** 1126 * dst = src1 | shiftType(src2, imm). 1127 * 1128 * @param size register size. Has to be 32 or 64. 1129 * @param dst general purpose register. May not be null or stackpointer. 1130 * @param src1 general purpose register. May not be null or stackpointer. 1131 * @param src2 general purpose register. May not be null or stackpointer. 1132 * @param shiftType all types allowed, may not be null. 1133 * @param shiftAmt must be in range 0 to size - 1. 1134 */ 1135 public void or(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 1136 super.orr(size, dst, src1, src2, shiftType, shiftAmt); 1137 } 1138 1139 /** 1140 * dst = src1 & ~(shiftType(src2, imm)). 1141 * 1142 * @param size register size. Has to be 32 or 64. 1143 * @param dst general purpose register. May not be null or stackpointer. 1144 * @param src1 general purpose register. May not be null or stackpointer. 1145 * @param src2 general purpose register. May not be null or stackpointer. 1146 * @param shiftType all types allowed, may not be null. 1147 * @param shiftAmt must be in range 0 to size - 1. 1148 */ 1149 @Override 1150 public void bic(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 1151 super.bic(size, dst, src1, src2, shiftType, shiftAmt); 1152 } 1153 1154 /** 1155 * dst = src1 ^ ~(shiftType(src2, imm)). 1156 * 1157 * @param size register size. Has to be 32 or 64. 1158 * @param dst general purpose register. May not be null or stackpointer. 1159 * @param src1 general purpose register. May not be null or stackpointer. 1160 * @param src2 general purpose register. May not be null or stackpointer. 1161 * @param shiftType all types allowed, may not be null. 1162 * @param shiftAmt must be in range 0 to size - 1. 1163 */ 1164 @Override 1165 public void eon(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 1166 super.eon(size, dst, src1, src2, shiftType, shiftAmt); 1167 } 1168 1169 /** 1170 * dst = src1 | ~(shiftType(src2, imm)). 1171 * 1172 * @param size register size. Has to be 32 or 64. 1173 * @param dst general purpose register. May not be null or stackpointer. 1174 * @param src1 general purpose register. May not be null or stackpointer. 1175 * @param src2 general purpose register. May not be null or stackpointer. 1176 * @param shiftType all types allowed, may not be null. 1177 * @param shiftAmt must be in range 0 to size - 1. 1178 */ 1179 @Override 1180 public void orn(int size, Register dst, Register src1, Register src2, ShiftType shiftType, int shiftAmt) { 1181 super.orn(size, dst, src1, src2, shiftType, shiftAmt); 1182 } 1183 1184 /** 1185 * Sign-extend value from src into dst. 1186 * 1187 * @param destSize destination register size. Must be 32 or 64. 1188 * @param srcSize source register size. Must be smaller than destSize. 1189 * @param dst general purpose register. May not be null, stackpointer or zero-register. 1190 * @param src general purpose register. May not be null, stackpointer or zero-register. 1191 */ 1192 public void sxt(int destSize, int srcSize, Register dst, Register src) { 1193 assert (srcSize < destSize && srcSize > 0); 1194 super.sbfm(destSize, dst, src, 0, srcSize - 1); 1195 } 1196 1197 /** 1198 * dst = src if condition else -src. 1199 * 1200 * @param size register size. Must be 32 or 64. 1201 * @param dst general purpose register. May not be null or the stackpointer. 1202 * @param src general purpose register. May not be null or the stackpointer. 1203 * @param condition any condition except AV or NV. May not be null. 1204 */ 1205 public void csneg(int size, Register dst, Register src, ConditionFlag condition) { 1206 super.csneg(size, dst, src, src, condition.negate()); 1207 } 1208 1209 /** 1210 * @return True if the immediate can be used directly for logical 64-bit instructions. 1211 */ 1212 public static boolean isLogicalImmediate(long imm) { 1213 return LogicalImmediateTable.isRepresentable(true, imm) != LogicalImmediateTable.Representable.NO; 1214 } 1215 1216 /** 1217 * @return True if the immediate can be used directly for logical 32-bit instructions. 1218 */ 1219 public static boolean isLogicalImmediate(int imm) { 1220 return LogicalImmediateTable.isRepresentable(imm) == LogicalImmediateTable.Representable.YES; 1221 } 1222 1223 /* Float instructions */ 1224 1225 /** 1226 * Moves integer to float, float to integer, or float to float. Does not support integer to 1227 * integer moves. 1228 * 1229 * @param size register size. Has to be 32 or 64. 1230 * @param dst Either floating-point or general-purpose register. If general-purpose register may 1231 * not be stackpointer or zero register. Cannot be null in any case. 1232 * @param src Either floating-point or general-purpose register. If general-purpose register may 1233 * not be stackpointer. Cannot be null in any case. 1234 */ 1235 @Override 1236 public void fmov(int size, Register dst, Register src) { 1237 assert !(dst.getRegisterCategory().equals(CPU) && src.getRegisterCategory().equals(CPU)) : "src and dst cannot both be integer registers."; 1238 if (dst.getRegisterCategory().equals(CPU)) { 1239 super.fmovFpu2Cpu(size, dst, src); 1240 } else if (src.getRegisterCategory().equals(CPU)) { 1241 super.fmovCpu2Fpu(size, dst, src); 1242 } else { 1243 super.fmov(size, dst, src); 1244 } 1245 } 1246 1247 /** 1248 * 1249 * @param size register size. Has to be 32 or 64. 1250 * @param dst floating point register. May not be null. 1251 * @param imm immediate that is loaded into dst. If size is 32 only float immediates can be 1252 * loaded, i.e. (float) imm == imm must be true. In all cases 1253 * {@code isFloatImmediate}, respectively {@code #isDoubleImmediate} must be true 1254 * depending on size. 1255 */ 1256 @Override 1257 public void fmov(int size, Register dst, double imm) { 1258 if (imm == 0.0) { 1259 assert Double.doubleToRawLongBits(imm) == 0L : "-0.0 is no valid immediate."; 1260 super.fmovCpu2Fpu(size, dst, zr); 1261 } else { 1262 super.fmov(size, dst, imm); 1263 } 1264 } 1265 1266 /** 1267 * 1268 * @return true if immediate can be loaded directly into floating-point register, false 1269 * otherwise. 1270 */ 1271 public static boolean isDoubleImmediate(double imm) { 1272 return Double.doubleToRawLongBits(imm) == 0L || AArch64Assembler.isDoubleImmediate(imm); 1273 } 1274 1275 /** 1276 * 1277 * @return true if immediate can be loaded directly into floating-point register, false 1278 * otherwise. 1279 */ 1280 public static boolean isFloatImmediate(float imm) { 1281 return Float.floatToRawIntBits(imm) == 0 || AArch64Assembler.isFloatImmediate(imm); 1282 } 1283 1284 /** 1285 * Conditional move. dst = src1 if condition else src2. 1286 * 1287 * @param size register size. 1288 * @param result floating point register. May not be null. 1289 * @param trueValue floating point register. May not be null. 1290 * @param falseValue floating point register. May not be null. 1291 * @param condition every condition allowed. May not be null. 1292 */ 1293 public void fcmov(int size, Register result, Register trueValue, Register falseValue, ConditionFlag condition) { 1294 super.fcsel(size, result, trueValue, falseValue, condition); 1295 } 1296 1297 /** 1298 * dst = src1 % src2. 1299 * 1300 * @param size register size. Has to be 32 or 64. 1301 * @param dst floating-point register. May not be null. 1302 * @param n numerator. Floating-point register. May not be null. 1303 * @param d denominator. Floating-point register. May not be null. 1304 */ 1305 public void frem(int size, Register dst, Register n, Register d) { 1306 // There is no frem instruction, instead we compute the remainder using the relation: 1307 // rem = n - Truncating(n / d) * d 1308 super.fdiv(size, dst, n, d); 1309 super.frintz(size, dst, dst); 1310 super.fmsub(size, dst, dst, d, n); 1311 } 1312 1313 /* Branches */ 1314 1315 /** 1316 * Compares x and y and sets condition flags. 1317 * 1318 * @param size register size. Has to be 32 or 64. 1319 * @param x general purpose register. May not be null or stackpointer. 1320 * @param y general purpose register. May not be null or stackpointer. 1321 */ 1322 public void cmp(int size, Register x, Register y) { 1323 assert size == 32 || size == 64; 1324 super.subs(size, zr, x, y, ShiftType.LSL, 0); 1325 } 1326 1327 /** 1328 * Compares x to y and sets condition flags. 1329 * 1330 * @param size register size. Has to be 32 or 64. 1331 * @param x general purpose register. May not be null or stackpointer. 1332 * @param y comparison immediate, {@link #isComparisonImmediate(long)} has to be true for it. 1333 */ 1334 public void cmp(int size, Register x, int y) { 1335 assert size == 32 || size == 64; 1336 if (y < 0) { 1337 super.adds(size, zr, x, -y); 1338 } else { 1339 super.subs(size, zr, x, y); 1340 } 1341 } 1342 1343 /** 1344 * Sets condition flags according to result of x & y. 1345 * 1346 * @param size register size. Has to be 32 or 64. 1347 * @param dst general purpose register. May not be null or stack-pointer. 1348 * @param x general purpose register. May not be null or stackpointer. 1349 * @param y general purpose register. May not be null or stackpointer. 1350 */ 1351 public void ands(int size, Register dst, Register x, Register y) { 1352 super.ands(size, dst, x, y, ShiftType.LSL, 0); 1353 } 1354 1355 /** 1356 * Sets overflow flag according to result of x * y. 1357 * 1358 * @param size register size. Has to be 32 or 64. 1359 * @param dst general purpose register. May not be null or stack-pointer. 1360 * @param x general purpose register. May not be null or stackpointer. 1361 * @param y general purpose register. May not be null or stackpointer. 1362 */ 1363 public void mulvs(int size, Register dst, Register x, Register y) { 1364 try (ScratchRegister sc1 = getScratchRegister(); 1365 ScratchRegister sc2 = getScratchRegister()) { 1366 switch (size) { 1367 case 64: { 1368 // Be careful with registers: it's possible that x, y, and dst are the same 1369 // register. 1370 Register rscratch1 = sc1.getRegister(); 1371 Register rscratch2 = sc2.getRegister(); 1372 mul(64, rscratch1, x, y); // Result bits 0..63 1373 smulh(64, rscratch2, x, y); // Result bits 64..127 1374 // Top is pure sign ext 1375 subs(64, zr, rscratch2, rscratch1, ShiftType.ASR, 63); 1376 // Copy all 64 bits of the result into dst 1377 mov(64, dst, rscratch1); 1378 mov(rscratch1, 0x80000000); 1379 // Develop 0 (EQ), or 0x80000000 (NE) 1380 cmov(32, rscratch1, rscratch1, zr, ConditionFlag.NE); 1381 cmp(32, rscratch1, 1); 1382 // 0x80000000 - 1 => VS 1383 break; 1384 } 1385 case 32: { 1386 Register rscratch1 = sc1.getRegister(); 1387 smaddl(rscratch1, x, y, zr); 1388 // Copy the low 32 bits of the result into dst 1389 mov(32, dst, rscratch1); 1390 subs(64, zr, rscratch1, rscratch1, ExtendType.SXTW, 0); 1391 // NE => overflow 1392 mov(rscratch1, 0x80000000); 1393 // Develop 0 (EQ), or 0x80000000 (NE) 1394 cmov(32, rscratch1, rscratch1, zr, ConditionFlag.NE); 1395 cmp(32, rscratch1, 1); 1396 // 0x80000000 - 1 => VS 1397 break; 1398 } 1399 } 1400 } 1401 } 1402 1403 /** 1404 * When patching up Labels we have to know what kind of code to generate. 1405 */ 1406 public enum PatchLabelKind { 1407 BRANCH_CONDITIONALLY(0x0), 1408 BRANCH_UNCONDITIONALLY(0x1), 1409 BRANCH_NONZERO(0x2), 1410 BRANCH_ZERO(0x3), 1411 BRANCH_BIT_NONZERO(0x4), 1412 BRANCH_BIT_ZERO(0x5), 1413 JUMP_ADDRESS(0x6), 1414 ADR(0x7); 1415 1416 /** 1417 * Offset by which additional information for branch conditionally, branch zero and branch 1418 * non zero has to be shifted. 1419 */ 1420 public static final int INFORMATION_OFFSET = 5; 1421 1422 public final int encoding; 1423 1424 PatchLabelKind(int encoding) { 1425 this.encoding = encoding; 1426 } 1427 1428 /** 1429 * @return PatchLabelKind with given encoding. 1430 */ 1431 private static PatchLabelKind fromEncoding(int encoding) { 1432 return values()[encoding & NumUtil.getNbitNumberInt(INFORMATION_OFFSET)]; 1433 } 1434 1435 } 1436 1437 public void adr(Register dst, Label label) { 1438 // TODO Handle case where offset is too large for a single jump instruction 1439 if (label.isBound()) { 1440 int offset = label.position() - position(); 1441 super.adr(dst, offset); 1442 } else { 1443 label.addPatchAt(position(), this); 1444 // Encode condition flag so that we know how to patch the instruction later 1445 emitInt(PatchLabelKind.ADR.encoding | dst.encoding << PatchLabelKind.INFORMATION_OFFSET); 1446 } 1447 } 1448 1449 /** 1450 * Compare register and branch if non-zero. 1451 * 1452 * @param size Instruction size in bits. Should be either 32 or 64. 1453 * @param cmp general purpose register. May not be null, zero-register or stackpointer. 1454 * @param label Can only handle 21-bit word-aligned offsets for now. May be unbound. Non null. 1455 */ 1456 public void cbnz(int size, Register cmp, Label label) { 1457 // TODO Handle case where offset is too large for a single jump instruction 1458 if (label.isBound()) { 1459 int offset = label.position() - position(); 1460 super.cbnz(size, cmp, offset); 1461 } else { 1462 label.addPatchAt(position(), this); 1463 int regEncoding = cmp.encoding << (PatchLabelKind.INFORMATION_OFFSET + 1); 1464 int sizeEncoding = (size == 64 ? 1 : 0) << PatchLabelKind.INFORMATION_OFFSET; 1465 // Encode condition flag so that we know how to patch the instruction later 1466 emitInt(PatchLabelKind.BRANCH_NONZERO.encoding | regEncoding | sizeEncoding); 1467 } 1468 } 1469 1470 /** 1471 * Compare register and branch if zero. 1472 * 1473 * @param size Instruction size in bits. Should be either 32 or 64. 1474 * @param cmp general purpose register. May not be null, zero-register or stackpointer. 1475 * @param label Can only handle 21-bit word-aligned offsets for now. May be unbound. Non null. 1476 */ 1477 public void cbz(int size, Register cmp, Label label) { 1478 // TODO Handle case where offset is too large for a single jump instruction 1479 if (label.isBound()) { 1480 int offset = label.position() - position(); 1481 super.cbz(size, cmp, offset); 1482 } else { 1483 label.addPatchAt(position(), this); 1484 int regEncoding = cmp.encoding << (PatchLabelKind.INFORMATION_OFFSET + 1); 1485 int sizeEncoding = (size == 64 ? 1 : 0) << PatchLabelKind.INFORMATION_OFFSET; 1486 // Encode condition flag so that we know how to patch the instruction later 1487 emitInt(PatchLabelKind.BRANCH_ZERO.encoding | regEncoding | sizeEncoding); 1488 } 1489 } 1490 1491 /** 1492 * Test a single bit and branch if the bit is nonzero. 1493 * 1494 * @param cmp general purpose register. May not be null, zero-register or stackpointer. 1495 * @param uimm6 Unsigned 6-bit bit index. 1496 * @param label Can only handle 16-bit word-aligned offsets for now. May be unbound. Non null. 1497 */ 1498 public void tbnz(Register cmp, int uimm6, Label label) { 1499 assert NumUtil.isUnsignedNbit(6, uimm6); 1500 if (label.isBound()) { 1501 int offset = label.position() - position(); 1502 super.tbnz(cmp, uimm6, offset); 1503 } else { 1504 label.addPatchAt(position(), this); 1505 int indexEncoding = uimm6 << PatchLabelKind.INFORMATION_OFFSET; 1506 int regEncoding = cmp.encoding << (PatchLabelKind.INFORMATION_OFFSET + 6); 1507 emitInt(PatchLabelKind.BRANCH_BIT_NONZERO.encoding | indexEncoding | regEncoding); 1508 } 1509 } 1510 1511 /** 1512 * Test a single bit and branch if the bit is zero. 1513 * 1514 * @param cmp general purpose register. May not be null, zero-register or stackpointer. 1515 * @param uimm6 Unsigned 6-bit bit index. 1516 * @param label Can only handle 16-bit word-aligned offsets for now. May be unbound. Non null. 1517 */ 1518 public void tbz(Register cmp, int uimm6, Label label) { 1519 assert NumUtil.isUnsignedNbit(6, uimm6); 1520 if (label.isBound()) { 1521 int offset = label.position() - position(); 1522 super.tbz(cmp, uimm6, offset); 1523 } else { 1524 label.addPatchAt(position(), this); 1525 int indexEncoding = uimm6 << PatchLabelKind.INFORMATION_OFFSET; 1526 int regEncoding = cmp.encoding << (PatchLabelKind.INFORMATION_OFFSET + 6); 1527 emitInt(PatchLabelKind.BRANCH_BIT_ZERO.encoding | indexEncoding | regEncoding); 1528 } 1529 } 1530 1531 /** 1532 * Branches to label if condition is true. 1533 * 1534 * @param condition any condition value allowed. Non null. 1535 * @param label Can only handle 21-bit word-aligned offsets for now. May be unbound. Non null. 1536 */ 1537 public void branchConditionally(ConditionFlag condition, Label label) { 1538 // TODO Handle case where offset is too large for a single jump instruction 1539 if (label.isBound()) { 1540 int offset = label.position() - position(); 1541 super.b(condition, offset); 1542 } else { 1543 label.addPatchAt(position(), this); 1544 // Encode condition flag so that we know how to patch the instruction later 1545 emitInt(PatchLabelKind.BRANCH_CONDITIONALLY.encoding | condition.encoding << PatchLabelKind.INFORMATION_OFFSET); 1546 } 1547 } 1548 1549 /** 1550 * Branches if condition is true. Address of jump is patched up by HotSpot c++ code. 1551 * 1552 * @param condition any condition value allowed. Non null. 1553 */ 1554 public void branchConditionally(ConditionFlag condition) { 1555 // Correct offset is fixed up by HotSpot later. 1556 super.b(condition, 0); 1557 } 1558 1559 /** 1560 * Jumps to label. 1561 * 1562 * param label Can only handle signed 28-bit offsets. May be unbound. Non null. 1563 */ 1564 @Override 1565 public void jmp(Label label) { 1566 // TODO Handle case where offset is too large for a single jump instruction 1567 if (label.isBound()) { 1568 int offset = label.position() - position(); 1569 super.b(offset); 1570 } else { 1571 label.addPatchAt(position(), this); 1572 emitInt(PatchLabelKind.BRANCH_UNCONDITIONALLY.encoding); 1573 } 1574 } 1575 1576 /** 1577 * Jump to address in dest. 1578 * 1579 * @param dest General purpose register. May not be null, zero-register or stackpointer. 1580 */ 1581 public void jmp(Register dest) { 1582 super.br(dest); 1583 } 1584 1585 /** 1586 * Immediate jump instruction fixed up by HotSpot c++ code. 1587 */ 1588 public void jmp() { 1589 // Offset has to be fixed up by c++ code. 1590 super.b(0); 1591 } 1592 1593 /** 1594 * 1595 * @return true if immediate offset can be used in a single branch instruction. 1596 */ 1597 public static boolean isBranchImmediateOffset(long imm) { 1598 return NumUtil.isSignedNbit(28, imm); 1599 } 1600 1601 /* system instructions */ 1602 1603 /** 1604 * Exception codes used when calling hlt instruction. 1605 */ 1606 public enum AArch64ExceptionCode { 1607 NO_SWITCH_TARGET(0x0), 1608 BREAKPOINT(0x1); 1609 1610 public final int encoding; 1611 1612 AArch64ExceptionCode(int encoding) { 1613 this.encoding = encoding; 1614 } 1615 } 1616 1617 /** 1618 * Halting mode software breakpoint: Enters halting mode debug state if enabled, else treated as 1619 * UNALLOCATED instruction. 1620 * 1621 * @param exceptionCode exception code specifying why halt was called. Non null. 1622 */ 1623 public void hlt(AArch64ExceptionCode exceptionCode) { 1624 super.hlt(exceptionCode.encoding); 1625 } 1626 1627 /** 1628 * Monitor mode software breakpoint: exception routed to a debug monitor executing in a higher 1629 * exception level. 1630 * 1631 * @param exceptionCode exception code specifying why break was called. Non null. 1632 */ 1633 public void brk(AArch64ExceptionCode exceptionCode) { 1634 super.brk(exceptionCode.encoding); 1635 } 1636 1637 public void pause() { 1638 super.hint(SystemHint.YIELD); 1639 } 1640 1641 /** 1642 * Executes no-op instruction. No registers or flags are updated, except for PC. 1643 */ 1644 public void nop() { 1645 super.hint(SystemHint.NOP); 1646 } 1647 1648 /** 1649 * Consumption of Speculative Data Barrier. This is a memory barrier that controls speculative 1650 * execution and data value prediction. 1651 */ 1652 public void csdb() { 1653 super.hint(SystemHint.CSDB); 1654 } 1655 1656 /** 1657 * Same as {@link #nop()}. 1658 */ 1659 @Override 1660 public void ensureUniquePC() { 1661 nop(); 1662 } 1663 1664 /** 1665 * Aligns PC. 1666 * 1667 * @param modulus Has to be positive multiple of 4. 1668 */ 1669 @Override 1670 public void align(int modulus) { 1671 assert modulus > 0 && (modulus & 0x3) == 0 : "Modulus has to be a positive multiple of 4."; 1672 if (position() % modulus == 0) { 1673 return; 1674 } 1675 int offset = modulus - position() % modulus; 1676 for (int i = 0; i < offset; i += 4) { 1677 nop(); 1678 } 1679 } 1680 1681 /** 1682 * Patches jump targets when label gets bound. 1683 */ 1684 @Override 1685 protected void patchJumpTarget(int branch, int jumpTarget) { 1686 int instruction = getInt(branch); 1687 int branchOffset = jumpTarget - branch; 1688 PatchLabelKind type = PatchLabelKind.fromEncoding(instruction); 1689 switch (type) { 1690 case BRANCH_CONDITIONALLY: 1691 ConditionFlag cf = ConditionFlag.fromEncoding(instruction >>> PatchLabelKind.INFORMATION_OFFSET); 1692 super.b(cf, branchOffset, branch); 1693 break; 1694 case BRANCH_UNCONDITIONALLY: 1695 super.b(branchOffset, branch); 1696 break; 1697 case JUMP_ADDRESS: 1698 int offset = instruction >>> PatchLabelKind.INFORMATION_OFFSET; 1699 emitInt(jumpTarget - offset, branch); 1700 break; 1701 case BRANCH_NONZERO: 1702 case BRANCH_ZERO: { 1703 int information = instruction >>> PatchLabelKind.INFORMATION_OFFSET; 1704 int sizeEncoding = information & 1; 1705 int regEncoding = information >>> 1; 1706 Register reg = AArch64.cpuRegisters.get(regEncoding); 1707 // 1 => 64; 0 => 32 1708 int size = sizeEncoding * 32 + 32; 1709 if (!NumUtil.isSignedNbit(21, branchOffset)) { 1710 throw new BranchTargetOutOfBoundsException(true, "Branch target %d out of bounds", branchOffset); 1711 } 1712 switch (type) { 1713 case BRANCH_NONZERO: 1714 super.cbnz(size, reg, branchOffset, branch); 1715 break; 1716 case BRANCH_ZERO: 1717 super.cbz(size, reg, branchOffset, branch); 1718 break; 1719 } 1720 break; 1721 } 1722 case BRANCH_BIT_NONZERO: 1723 case BRANCH_BIT_ZERO: { 1724 int information = instruction >>> PatchLabelKind.INFORMATION_OFFSET; 1725 int sizeEncoding = information & NumUtil.getNbitNumberInt(6); 1726 int regEncoding = information >>> 6; 1727 Register reg = AArch64.cpuRegisters.get(regEncoding); 1728 if (!NumUtil.isSignedNbit(16, branchOffset)) { 1729 throw new BranchTargetOutOfBoundsException(true, "Branch target %d out of bounds", branchOffset); 1730 } 1731 switch (type) { 1732 case BRANCH_BIT_NONZERO: 1733 super.tbnz(reg, sizeEncoding, branchOffset, branch); 1734 break; 1735 case BRANCH_BIT_ZERO: 1736 super.tbz(reg, sizeEncoding, branchOffset, branch); 1737 break; 1738 } 1739 break; 1740 } 1741 case ADR: { 1742 int information = instruction >>> PatchLabelKind.INFORMATION_OFFSET; 1743 int regEncoding = information; 1744 Register reg = AArch64.cpuRegisters.get(regEncoding); 1745 super.adr(reg, branchOffset, branch); 1746 break; 1747 } 1748 default: 1749 throw GraalError.shouldNotReachHere(); 1750 } 1751 } 1752 1753 /** 1754 * Generates an address of the form {@code base + displacement}. 1755 * 1756 * Does not change base register to fulfill this requirement. Will fail if displacement cannot 1757 * be represented directly as address. 1758 * 1759 * @param base general purpose register. May not be null or the zero register. 1760 * @param displacement arbitrary displacement added to base. 1761 * @return AArch64Address referencing memory at {@code base + displacement}. 1762 */ 1763 @Override 1764 public AArch64Address makeAddress(Register base, int displacement) { 1765 return makeAddress(base, displacement, zr, /* signExtend */false, /* transferSize */0, // 1766 zr, /* allowOverwrite */false); 1767 } 1768 1769 @Override 1770 public AArch64Address getPlaceholder(int instructionStartPosition) { 1771 return AArch64Address.PLACEHOLDER; 1772 } 1773 1774 public void addressOf(Register dst) { 1775 if (codePatchingAnnotationConsumer != null) { 1776 codePatchingAnnotationConsumer.accept(new AdrpAddMacroInstruction(position())); 1777 } 1778 super.adrp(dst); 1779 super.add(64, dst, dst, 0); 1780 } 1781 1782 /** 1783 * Loads an address into Register d. 1784 * 1785 * @param d general purpose register. May not be null. 1786 * @param a AArch64Address the address of an operand. 1787 */ 1788 public void lea(Register d, AArch64Address a) { 1789 a.lea(this, d); 1790 } 1791 1792 /** 1793 * Count the set bits of src register. 1794 * 1795 * @param size src register size. Has to be 32 or 64. 1796 * @param dst general purpose register. Should not be null or zero-register. 1797 * @param src general purpose register. Should not be null. 1798 * @param vreg SIMD register. Should not be null. 1799 */ 1800 public void popcnt(int size, Register dst, Register src, Register vreg) { 1801 assert 32 == size || 64 == size : "Invalid data size"; 1802 fmov(size, vreg, src); 1803 final int fixedSize = 64; 1804 cnt(fixedSize, vreg, vreg); 1805 addv(fixedSize, SIMDElementSize.Byte, vreg, vreg); 1806 umov(fixedSize, dst, 0, vreg); 1807 } 1808 1809 public interface MacroInstruction { 1810 void patch(int codePos, int relative, byte[] code); 1811 } 1812 1813 /** 1814 * Emits elf patchable adrp ldr sequence. 1815 */ 1816 public void adrpLdr(int srcSize, Register result, AArch64Address a) { 1817 if (codePatchingAnnotationConsumer != null) { 1818 codePatchingAnnotationConsumer.accept(new AdrpLdrMacroInstruction(position())); 1819 } 1820 super.adrp(a.getBase()); 1821 this.ldr(srcSize, result, a); 1822 } 1823 1824 public static class AdrpLdrMacroInstruction extends CodeAnnotation implements MacroInstruction { 1825 public AdrpLdrMacroInstruction(int position) { 1826 super(position); 1827 } 1828 1829 @Override 1830 public String toString() { 1831 return "ADRP_LDR"; 1832 } 1833 1834 @Override 1835 public void patch(int codePos, int relative, byte[] code) { 1836 throw GraalError.unimplemented(); 1837 } 1838 } 1839 1840 public static class AdrpAddMacroInstruction extends CodeAnnotation implements MacroInstruction { 1841 public AdrpAddMacroInstruction(int position) { 1842 super(position); 1843 } 1844 1845 @Override 1846 public String toString() { 1847 return "ADRP_ADD"; 1848 } 1849 1850 @Override 1851 public void patch(int codePos, int relative, byte[] code) { 1852 throw GraalError.unimplemented(); 1853 } 1854 } 1855 }