1 /* 2 * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 package org.graalvm.compiler.core.aarch64; 26 27 import static jdk.vm.ci.aarch64.AArch64.sp; 28 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 29 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant; 30 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 31 32 import java.util.function.Function; 33 34 import org.graalvm.compiler.asm.aarch64.AArch64Address.AddressingMode; 35 import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ConditionFlag; 36 import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler; 37 import org.graalvm.compiler.core.common.LIRKind; 38 import org.graalvm.compiler.core.common.calc.Condition; 39 import org.graalvm.compiler.core.common.spi.LIRKindTool; 40 import org.graalvm.compiler.debug.GraalError; 41 import org.graalvm.compiler.lir.LIRFrameState; 42 import org.graalvm.compiler.lir.LIRValueUtil; 43 import org.graalvm.compiler.lir.LabelRef; 44 import org.graalvm.compiler.lir.StandardOp; 45 import org.graalvm.compiler.lir.SwitchStrategy; 46 import org.graalvm.compiler.lir.Variable; 47 import org.graalvm.compiler.lir.aarch64.AArch64AddressValue; 48 import org.graalvm.compiler.lir.aarch64.AArch64ArithmeticOp; 49 import org.graalvm.compiler.lir.aarch64.AArch64ArrayCompareToOp; 50 import org.graalvm.compiler.lir.aarch64.AArch64ArrayEqualsOp; 51 import org.graalvm.compiler.lir.aarch64.AArch64ByteSwapOp; 52 import org.graalvm.compiler.lir.aarch64.AArch64Compare; 53 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow; 54 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.BranchOp; 55 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CompareBranchZeroOp; 56 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CondMoveOp; 57 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CondSetOp; 58 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.StrategySwitchOp; 59 import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.TableSwitchOp; 60 import org.graalvm.compiler.lir.aarch64.AArch64LIRFlagsVersioned; 61 import org.graalvm.compiler.lir.aarch64.AArch64Move; 62 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddOp; 63 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddLSEOp; 64 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.CompareAndSwapOp; 65 import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndWriteOp; 66 import org.graalvm.compiler.lir.aarch64.AArch64Move.MembarOp; 67 import org.graalvm.compiler.lir.aarch64.AArch64PauseOp; 68 import org.graalvm.compiler.lir.aarch64.AArch64SpeculativeBarrier; 69 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 70 import org.graalvm.compiler.lir.gen.LIRGenerator; 71 import org.graalvm.compiler.phases.util.Providers; 72 73 import jdk.vm.ci.aarch64.AArch64; 74 import jdk.vm.ci.aarch64.AArch64Kind; 75 import jdk.vm.ci.code.CallingConvention; 76 import jdk.vm.ci.code.RegisterValue; 77 import jdk.vm.ci.meta.AllocatableValue; 78 import jdk.vm.ci.meta.JavaConstant; 79 import jdk.vm.ci.meta.JavaKind; 80 import jdk.vm.ci.meta.PlatformKind; 81 import jdk.vm.ci.meta.PrimitiveConstant; 82 import jdk.vm.ci.meta.Value; 83 import jdk.vm.ci.meta.ValueKind; 84 85 public abstract class AArch64LIRGenerator extends LIRGenerator { 86 87 public AArch64LIRGenerator(LIRKindTool lirKindTool, AArch64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) { 88 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes); 89 } 90 91 /** 92 * Checks whether the supplied constant can be used without loading it into a register for store 93 * operations, i.e., on the right hand side of a memory access. 94 * 95 * @param c The constant to check. 96 * @return True if the constant can be used directly, false if the constant needs to be in a 97 * register. 98 */ 99 protected static final boolean canStoreConstant(JavaConstant c) { 100 // Our own code never calls this since we can't make a definite statement about whether or 101 // not we can inline a constant without knowing what kind of operation we execute. Let's be 102 // optimistic here and fix up mistakes later. 103 return true; 104 } 105 106 /** 107 * If val denotes the stackpointer, move it to another location. This is necessary since most 108 * ops cannot handle the stackpointer as input or output. 109 */ 110 public AllocatableValue moveSp(AllocatableValue val) { 111 if (val instanceof RegisterValue && ((RegisterValue) val).getRegister().equals(sp)) { 112 assert val.getPlatformKind() == AArch64Kind.QWORD : "Stackpointer must be long"; 113 return emitMove(val); 114 } 115 return val; 116 } 117 118 /** 119 * AArch64 cannot use anything smaller than a word in any instruction other than load and store. 120 */ 121 @Override 122 public <K extends ValueKind<K>> K toRegisterKind(K kind) { 123 switch ((AArch64Kind) kind.getPlatformKind()) { 124 case BYTE: 125 case WORD: 126 return kind.changeType(AArch64Kind.DWORD); 127 default: 128 return kind; 129 } 130 } 131 132 @Override 133 public void emitNullCheck(Value address, LIRFrameState state) { 134 append(new AArch64Move.NullCheckOp(asAddressValue(address), state)); 135 } 136 137 @Override 138 public Variable emitAddress(AllocatableValue stackslot) { 139 Variable result = newVariable(LIRKind.value(target().arch.getWordKind())); 140 append(new AArch64Move.StackLoadAddressOp(result, stackslot)); 141 return result; 142 } 143 144 public AArch64AddressValue asAddressValue(Value address) { 145 if (address instanceof AArch64AddressValue) { 146 return (AArch64AddressValue) address; 147 } else { 148 return new AArch64AddressValue(address.getValueKind(), asAllocatable(address), Value.ILLEGAL, 0, 1, AddressingMode.BASE_REGISTER_ONLY); 149 } 150 } 151 152 @Override 153 public Variable emitLogicCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 154 Variable prevValue = newVariable(expectedValue.getValueKind()); 155 Variable scratch = newVariable(LIRKind.value(AArch64Kind.DWORD)); 156 append(new CompareAndSwapOp(prevValue, loadReg(expectedValue), loadReg(newValue), asAllocatable(address), scratch)); 157 assert trueValue.getValueKind().equals(falseValue.getValueKind()); 158 Variable result = newVariable(trueValue.getValueKind()); 159 append(new CondMoveOp(result, ConditionFlag.EQ, asAllocatable(trueValue), asAllocatable(falseValue))); 160 return result; 161 } 162 163 @Override 164 public Variable emitValueCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue) { 165 Variable result = newVariable(newValue.getValueKind()); 166 Variable scratch = newVariable(LIRKind.value(AArch64Kind.WORD)); 167 append(new CompareAndSwapOp(result, loadNonCompareConst(expectedValue), loadReg(newValue), asAllocatable(address), scratch)); 168 return result; 169 } 170 171 @Override 172 public Value emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue) { 173 Variable result = newVariable(kind); 174 Variable scratch = newVariable(kind); 175 append(new AtomicReadAndWriteOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), asAllocatable(newValue), asAllocatable(scratch))); 176 return result; 177 } 178 179 @Override 180 public Value emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta) { 181 Variable result = newVariable(kind); 182 if (AArch64LIRFlagsVersioned.useLSE(target().arch)) { 183 append(new AtomicReadAndAddLSEOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), asAllocatable(delta))); 184 } else { 185 append(new AtomicReadAndAddOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), delta)); 186 } 187 return result; 188 } 189 190 @Override 191 public void emitMembar(int barriers) { 192 int necessaryBarriers = target().arch.requiredBarriers(barriers); 193 if (target().isMP && necessaryBarriers != 0) { 194 append(new MembarOp(necessaryBarriers)); 195 } 196 } 197 198 @Override 199 public void emitJump(LabelRef label) { 200 assert label != null; 201 append(new StandardOp.JumpOp(label)); 202 } 203 204 @Override 205 public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpKind, double overflowProbability) { 206 append(new AArch64ControlFlow.BranchOp(ConditionFlag.VS, overflow, noOverflow, overflowProbability)); 207 } 208 209 /** 210 * Branches to label if (left & right) == 0. If negated is true branchse on non-zero instead. 211 * 212 * @param left Integer kind. Non null. 213 * @param right Integer kind. Non null. 214 * @param trueDestination destination if left & right == 0. Non null. 215 * @param falseDestination destination if left & right != 0. Non null 216 * @param trueSuccessorProbability hoistoric probability that comparison is true 217 */ 218 @Override 219 public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueSuccessorProbability) { 220 assert ((AArch64Kind) left.getPlatformKind()).isInteger() && left.getPlatformKind() == right.getPlatformKind(); 221 ((AArch64ArithmeticLIRGenerator) getArithmetic()).emitBinary(LIRKind.combine(left, right), AArch64ArithmeticOp.ANDS, true, left, right); 222 append(new AArch64ControlFlow.BranchOp(ConditionFlag.EQ, trueDestination, falseDestination, trueSuccessorProbability)); 223 } 224 225 /** 226 * Conditionally move trueValue into new variable if cond + unorderedIsTrue is true, else 227 * falseValue. 228 * 229 * @param left Arbitrary value. Has to have same type as right. Non null. 230 * @param right Arbitrary value. Has to have same type as left. Non null. 231 * @param cond condition that decides whether to move trueValue or falseValue into result. Non 232 * null. 233 * @param unorderedIsTrue defines whether floating-point comparisons consider unordered true or 234 * not. Ignored for integer comparisons. 235 * @param trueValue arbitrary value same type as falseValue. Non null. 236 * @param falseValue arbitrary value same type as trueValue. Non null. 237 * @return value containing trueValue if cond + unorderedIsTrue is true, else falseValue. Non 238 * null. 239 */ 240 @Override 241 public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) { 242 boolean mirrored = emitCompare(cmpKind, left, right, cond, unorderedIsTrue); 243 Condition finalCondition = mirrored ? cond.mirror() : cond; 244 boolean finalUnorderedIsTrue = mirrored ? !unorderedIsTrue : unorderedIsTrue; 245 ConditionFlag cmpCondition = toConditionFlag(((AArch64Kind) cmpKind).isInteger(), finalCondition, finalUnorderedIsTrue); 246 Variable result = newVariable(trueValue.getValueKind()); 247 248 if (isIntConstant(trueValue, 1) && isIntConstant(falseValue, 0)) { 249 append(new CondSetOp(result, cmpCondition)); 250 } else if (isIntConstant(trueValue, 0) && isIntConstant(falseValue, 1)) { 251 append(new CondSetOp(result, cmpCondition.negate())); 252 } else { 253 append(new CondMoveOp(result, cmpCondition, loadReg(trueValue), loadReg(falseValue))); 254 } 255 return result; 256 } 257 258 @Override 259 public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueDestination, LabelRef falseDestination, 260 double trueDestinationProbability) { 261 if (cond == Condition.EQ) { 262 // emit cbz instruction for IsNullNode. 263 assert !LIRValueUtil.isNullConstant(left) : "emitNullCheckBranch()'s null input should be in right."; 264 if (LIRValueUtil.isNullConstant(right)) { 265 append(new CompareBranchZeroOp(asAllocatable(left), trueDestination, falseDestination, trueDestinationProbability)); 266 return; 267 } 268 269 // emit cbz instruction for IntegerEquals when any of the inputs is zero. 270 AArch64Kind kind = (AArch64Kind) cmpKind; 271 if (kind.isInteger()) { 272 if (isIntConstant(left, 0)) { 273 append(new CompareBranchZeroOp(asAllocatable(right), trueDestination, falseDestination, trueDestinationProbability)); 274 return; 275 } else if (isIntConstant(right, 0)) { 276 append(new CompareBranchZeroOp(asAllocatable(left), trueDestination, falseDestination, trueDestinationProbability)); 277 return; 278 } 279 } 280 } 281 282 boolean mirrored = emitCompare(cmpKind, left, right, cond, unorderedIsTrue); 283 Condition finalCondition = mirrored ? cond.mirror() : cond; 284 boolean finalUnorderedIsTrue = mirrored ? !unorderedIsTrue : unorderedIsTrue; 285 ConditionFlag cmpCondition = toConditionFlag(((AArch64Kind) cmpKind).isInteger(), finalCondition, finalUnorderedIsTrue); 286 append(new BranchOp(cmpCondition, trueDestination, falseDestination, trueDestinationProbability)); 287 } 288 289 private static ConditionFlag toConditionFlag(boolean isInt, Condition cond, boolean unorderedIsTrue) { 290 return isInt ? toIntConditionFlag(cond) : toFloatConditionFlag(cond, unorderedIsTrue); 291 } 292 293 /** 294 * Takes a Condition and unorderedIsTrue flag and returns the correct Aarch64 specific 295 * ConditionFlag. Note: This is only correct if the emitCompare code for floats has correctly 296 * handled the case of 'EQ && unorderedIsTrue', respectively 'NE && !unorderedIsTrue'! 297 */ 298 private static ConditionFlag toFloatConditionFlag(Condition cond, boolean unorderedIsTrue) { 299 switch (cond) { 300 case LT: 301 return unorderedIsTrue ? ConditionFlag.LT : ConditionFlag.LO; 302 case LE: 303 return unorderedIsTrue ? ConditionFlag.LE : ConditionFlag.LS; 304 case GE: 305 return unorderedIsTrue ? ConditionFlag.PL : ConditionFlag.GE; 306 case GT: 307 return unorderedIsTrue ? ConditionFlag.HI : ConditionFlag.GT; 308 case EQ: 309 return ConditionFlag.EQ; 310 case NE: 311 return ConditionFlag.NE; 312 default: 313 throw GraalError.shouldNotReachHere(); 314 } 315 } 316 317 /** 318 * Takes a Condition and returns the correct Aarch64 specific ConditionFlag. 319 */ 320 private static ConditionFlag toIntConditionFlag(Condition cond) { 321 switch (cond) { 322 case EQ: 323 return ConditionFlag.EQ; 324 case NE: 325 return ConditionFlag.NE; 326 case LT: 327 return ConditionFlag.LT; 328 case LE: 329 return ConditionFlag.LE; 330 case GT: 331 return ConditionFlag.GT; 332 case GE: 333 return ConditionFlag.GE; 334 case AE: 335 return ConditionFlag.HS; 336 case BE: 337 return ConditionFlag.LS; 338 case AT: 339 return ConditionFlag.HI; 340 case BT: 341 return ConditionFlag.LO; 342 default: 343 throw GraalError.shouldNotReachHere(); 344 } 345 } 346 347 /** 348 * This method emits the compare instruction, and may reorder the operands. It returns true if 349 * it did so. 350 * 351 * @param a the left operand of the comparison. Has to have same type as b. Non null. 352 * @param b the right operand of the comparison. Has to have same type as a. Non null. 353 * @return true if mirrored (i.e. "b cmp a" instead of "a cmp b" was done). 354 */ 355 protected boolean emitCompare(PlatformKind cmpKind, Value a, Value b, Condition condition, boolean unorderedIsTrue) { 356 Value left; 357 Value right; 358 boolean mirrored; 359 AArch64Kind kind = (AArch64Kind) cmpKind; 360 if (kind.isInteger()) { 361 Value aExt = a; 362 Value bExt = b; 363 364 int compareBytes = cmpKind.getSizeInBytes(); 365 // AArch64 compares 32 or 64 bits: sign extend a and b as required. 366 if (compareBytes < a.getPlatformKind().getSizeInBytes()) { 367 aExt = arithmeticLIRGen.emitSignExtend(a, compareBytes * 8, 64); 368 } 369 if (compareBytes < b.getPlatformKind().getSizeInBytes()) { 370 bExt = arithmeticLIRGen.emitSignExtend(b, compareBytes * 8, 64); 371 } 372 373 if (LIRValueUtil.isVariable(bExt)) { 374 left = load(bExt); 375 right = loadNonConst(aExt); 376 mirrored = true; 377 } else { 378 left = load(aExt); 379 right = loadNonConst(bExt); 380 mirrored = false; 381 } 382 append(new AArch64Compare.CompareOp(left, loadNonCompareConst(right))); 383 } else if (kind.isSIMD()) { 384 if (AArch64Compare.FloatCompareOp.isFloatCmpConstant(a, condition, unorderedIsTrue)) { 385 left = load(b); 386 right = a; 387 mirrored = true; 388 } else if (AArch64Compare.FloatCompareOp.isFloatCmpConstant(b, condition, unorderedIsTrue)) { 389 left = load(a); 390 right = b; 391 mirrored = false; 392 } else { 393 left = load(a); 394 right = loadReg(b); 395 mirrored = false; 396 } 397 append(new AArch64Compare.FloatCompareOp(left, asAllocatable(right), condition, unorderedIsTrue)); 398 } else { 399 throw GraalError.shouldNotReachHere(); 400 } 401 return mirrored; 402 } 403 404 /** 405 * If value is a constant that cannot be used directly with a gpCompare instruction load it into 406 * a register and return the register, otherwise return constant value unchanged. 407 */ 408 protected Value loadNonCompareConst(Value value) { 409 if (!isCompareConstant(value)) { 410 return loadReg(value); 411 } 412 return value; 413 } 414 415 /** 416 * Checks whether value can be used directly with a gpCompare instruction. This is <b>not</b> 417 * the same as {@link AArch64ArithmeticLIRGenerator#isArithmeticConstant(JavaConstant)}, because 418 * 0.0 is a valid compare constant for floats, while there are no arithmetic constants for 419 * floats. 420 * 421 * @param value any type. Non null. 422 * @return true if value can be used directly in comparison instruction, false otherwise. 423 */ 424 public boolean isCompareConstant(Value value) { 425 if (isJavaConstant(value)) { 426 JavaConstant constant = asJavaConstant(value); 427 if (constant instanceof PrimitiveConstant) { 428 final long longValue = constant.asLong(); 429 long maskedValue; 430 switch (constant.getJavaKind()) { 431 case Boolean: 432 case Byte: 433 maskedValue = longValue & 0xFF; 434 break; 435 case Char: 436 case Short: 437 maskedValue = longValue & 0xFFFF; 438 break; 439 case Int: 440 maskedValue = longValue & 0xFFFF_FFFF; 441 break; 442 case Long: 443 maskedValue = longValue; 444 break; 445 default: 446 throw GraalError.shouldNotReachHere(); 447 } 448 return AArch64MacroAssembler.isArithmeticImmediate(maskedValue); 449 } else { 450 return constant.isDefaultForKind(); 451 } 452 } 453 return false; 454 } 455 456 /** 457 * Moves trueValue into result if (left & right) == 0, else falseValue. 458 * 459 * @param left Integer kind. Non null. 460 * @param right Integer kind. Non null. 461 * @param trueValue Integer kind. Non null. 462 * @param falseValue Integer kind. Non null. 463 * @return virtual register containing trueValue if (left & right) == 0, else falseValue. 464 */ 465 @Override 466 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) { 467 assert ((AArch64Kind) left.getPlatformKind()).isInteger() && ((AArch64Kind) right.getPlatformKind()).isInteger(); 468 assert ((AArch64Kind) trueValue.getPlatformKind()).isInteger() && ((AArch64Kind) falseValue.getPlatformKind()).isInteger(); 469 ((AArch64ArithmeticLIRGenerator) getArithmetic()).emitBinary(left.getValueKind(), AArch64ArithmeticOp.ANDS, true, left, right); 470 Variable result = newVariable(trueValue.getValueKind()); 471 472 if (isIntConstant(trueValue, 1) && isIntConstant(falseValue, 0)) { 473 append(new CondSetOp(result, ConditionFlag.EQ)); 474 } else if (isIntConstant(trueValue, 0) && isIntConstant(falseValue, 1)) { 475 append(new CondSetOp(result, ConditionFlag.NE)); 476 } else { 477 append(new CondMoveOp(result, ConditionFlag.EQ, load(trueValue), load(falseValue))); 478 } 479 return result; 480 } 481 482 @Override 483 public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) { 484 append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, newVariable(key.getValueKind()), AArch64LIRGenerator::toIntConditionFlag)); 485 } 486 487 protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue scratchValue, 488 Function<Condition, ConditionFlag> converter) { 489 return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, scratchValue, converter); 490 } 491 492 @Override 493 protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { 494 append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind()))); 495 } 496 497 @Override 498 public Variable emitByteSwap(Value input) { 499 Variable result = newVariable(LIRKind.combine(input)); 500 append(new AArch64ByteSwapOp(result, input)); 501 return result; 502 } 503 504 @Override 505 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) { 506 LIRKind resultKind = LIRKind.value(AArch64Kind.DWORD); 507 // DMS TODO: check calling conversion and registers used 508 RegisterValue res = AArch64.r0.asValue(resultKind); 509 RegisterValue cnt1 = AArch64.r1.asValue(length1.getValueKind()); 510 RegisterValue cnt2 = AArch64.r2.asValue(length2.getValueKind()); 511 emitMove(cnt1, length1); 512 emitMove(cnt2, length2); 513 append(new AArch64ArrayCompareToOp(this, kind1, kind2, res, array1, array2, cnt1, cnt2)); 514 Variable result = newVariable(resultKind); 515 emitMove(result, res); 516 return result; 517 } 518 519 @Override 520 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, int constantLength, boolean directPointers) { 521 Variable result = newVariable(LIRKind.value(AArch64Kind.DWORD)); 522 append(new AArch64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length), directPointers)); 523 return result; 524 } 525 526 @Override 527 protected JavaConstant zapValueForKind(PlatformKind kind) { 528 long dead = 0xDEADDEADDEADDEADL; 529 switch ((AArch64Kind) kind) { 530 case BYTE: 531 return JavaConstant.forByte((byte) dead); 532 case WORD: 533 return JavaConstant.forShort((short) dead); 534 case DWORD: 535 return JavaConstant.forInt((int) dead); 536 case QWORD: 537 return JavaConstant.forLong(dead); 538 case SINGLE: 539 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead)); 540 case DOUBLE: 541 return JavaConstant.forDouble(Double.longBitsToDouble(dead)); 542 default: 543 throw GraalError.shouldNotReachHere(); 544 } 545 } 546 547 /** 548 * Loads value into virtual register. Contrary to {@link #load(Value)} this handles 549 * RegisterValues (i.e. values corresponding to fixed physical registers) correctly, by not 550 * creating an unnecessary move into a virtual register. 551 * 552 * This avoids generating the following code: mov x0, x19 # x19 is fixed thread register ldr x0, 553 * [x0] instead of: ldr x0, [x19]. 554 */ 555 protected AllocatableValue loadReg(Value val) { 556 if (!(val instanceof Variable || val instanceof RegisterValue)) { 557 return emitMove(val); 558 } 559 return (AllocatableValue) val; 560 } 561 562 @Override 563 public void emitPause() { 564 append(new AArch64PauseOp()); 565 } 566 567 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args); 568 569 @Override 570 public void emitSpeculationFence() { 571 append(new AArch64SpeculativeBarrier()); 572 } 573 }