1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static jdk.vm.ci.code.ValueUtil.asRegister; 29 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue; 30 import static jdk.vm.ci.code.ValueUtil.isRegister; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 32 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 36 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 37 import static org.graalvm.compiler.lir.LIRValueUtil.asConstant; 38 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 39 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 40 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 41 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant; 42 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 43 44 import java.util.Optional; 45 46 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 49 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag; 50 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 51 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 52 import org.graalvm.compiler.core.common.LIRKind; 53 import org.graalvm.compiler.core.common.NumUtil; 54 import org.graalvm.compiler.core.common.calc.Condition; 55 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage; 56 import org.graalvm.compiler.core.common.spi.LIRKindTool; 57 import org.graalvm.compiler.debug.GraalError; 58 import org.graalvm.compiler.lir.ConstantValue; 59 import org.graalvm.compiler.lir.LIRFrameState; 60 import org.graalvm.compiler.lir.LIRInstruction; 61 import org.graalvm.compiler.lir.LIRValueUtil; 62 import org.graalvm.compiler.lir.LabelRef; 63 import org.graalvm.compiler.lir.StandardOp.JumpOp; 64 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp; 65 import org.graalvm.compiler.lir.SwitchStrategy; 66 import org.graalvm.compiler.lir.Variable; 67 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 68 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 69 import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp; 70 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp; 71 import org.graalvm.compiler.lir.amd64.AMD64ArrayIndexOfOp; 72 import org.graalvm.compiler.lir.amd64.AMD64Binary; 73 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 74 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp; 75 import org.graalvm.compiler.lir.amd64.AMD64Call; 76 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow; 77 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 78 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp; 79 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp; 80 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp; 81 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp; 82 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp; 83 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.HashTableSwitchOp; 84 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp; 85 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp; 86 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp; 87 import org.graalvm.compiler.lir.amd64.AMD64LFenceOp; 88 import org.graalvm.compiler.lir.amd64.AMD64Move; 89 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp; 90 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp; 91 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp; 92 import org.graalvm.compiler.lir.amd64.AMD64PauseOp; 93 import org.graalvm.compiler.lir.amd64.AMD64StringLatin1InflateOp; 94 import org.graalvm.compiler.lir.amd64.AMD64StringUTF16CompressOp; 95 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp; 96 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp; 97 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 98 import org.graalvm.compiler.lir.gen.LIRGenerator; 99 import org.graalvm.compiler.lir.hashing.Hasher; 100 import org.graalvm.compiler.phases.util.Providers; 101 102 import jdk.vm.ci.amd64.AMD64; 103 import jdk.vm.ci.amd64.AMD64Kind; 104 import jdk.vm.ci.code.CallingConvention; 105 import jdk.vm.ci.code.Register; 106 import jdk.vm.ci.code.RegisterValue; 107 import jdk.vm.ci.code.StackSlot; 108 import jdk.vm.ci.meta.AllocatableValue; 109 import jdk.vm.ci.meta.JavaConstant; 110 import jdk.vm.ci.meta.JavaKind; 111 import jdk.vm.ci.meta.PlatformKind; 112 import jdk.vm.ci.meta.VMConstant; 113 import jdk.vm.ci.meta.Value; 114 import jdk.vm.ci.meta.ValueKind; 115 116 /** 117 * This class implements the AMD64 specific portion of the LIR generator. 118 */ 119 public abstract class AMD64LIRGenerator extends LIRGenerator { 120 121 public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) { 122 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes); 123 } 124 125 /** 126 * Checks whether the supplied constant can be used without loading it into a register for store 127 * operations, i.e., on the right hand side of a memory access. 128 * 129 * @param c The constant to check. 130 * @return True if the constant can be used directly, false if the constant needs to be in a 131 * register. 132 */ 133 protected static final boolean canStoreConstant(JavaConstant c) { 134 // there is no immediate move of 64-bit constants on Intel 135 switch (c.getJavaKind()) { 136 case Long: 137 return NumUtil.isInt(c.asLong()); 138 case Double: 139 return false; 140 case Object: 141 return c.isNull(); 142 default: 143 return true; 144 } 145 } 146 147 @Override 148 protected JavaConstant zapValueForKind(PlatformKind kind) { 149 long dead = 0xDEADDEADDEADDEADL; 150 switch ((AMD64Kind) kind) { 151 case BYTE: 152 return JavaConstant.forByte((byte) dead); 153 case WORD: 154 return JavaConstant.forShort((short) dead); 155 case DWORD: 156 return JavaConstant.forInt((int) dead); 157 case QWORD: 158 return JavaConstant.forLong(dead); 159 case SINGLE: 160 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead)); 161 default: 162 // we don't support vector types, so just zap with double for all of them 163 return JavaConstant.forDouble(Double.longBitsToDouble(dead)); 164 } 165 } 166 167 public AMD64AddressValue asAddressValue(Value address) { 168 if (address instanceof AMD64AddressValue) { 169 return (AMD64AddressValue) address; 170 } else { 171 if (address instanceof JavaConstant) { 172 long displacement = ((JavaConstant) address).asLong(); 173 if (NumUtil.isInt(displacement)) { 174 return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement); 175 } 176 } 177 return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0); 178 } 179 } 180 181 @Override 182 public Variable emitAddress(AllocatableValue stackslot) { 183 Variable result = newVariable(LIRKind.value(target().arch.getWordKind())); 184 append(new StackLeaOp(result, stackslot)); 185 return result; 186 } 187 188 /** 189 * The AMD64 backend only uses DWORD and QWORD values in registers because of a performance 190 * penalty when accessing WORD or BYTE registers. This function converts small integer kinds to 191 * DWORD. 192 */ 193 @Override 194 public <K extends ValueKind<K>> K toRegisterKind(K kind) { 195 switch ((AMD64Kind) kind.getPlatformKind()) { 196 case BYTE: 197 case WORD: 198 return kind.changeType(AMD64Kind.DWORD); 199 default: 200 return kind; 201 } 202 } 203 204 private AllocatableValue asAllocatable(Value value, ValueKind<?> kind) { 205 if (value.getValueKind().equals(kind)) { 206 return asAllocatable(value); 207 } else if (isRegister(value)) { 208 return asRegister(value).asValue(kind); 209 } else if (isConstantValue(value)) { 210 return emitLoadConstant(kind, asConstant(value)); 211 } else { 212 Variable variable = newVariable(kind); 213 emitMove(variable, value); 214 return variable; 215 } 216 } 217 218 private Value emitCompareAndSwap(boolean isLogic, LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 219 ValueKind<?> kind = newValue.getValueKind(); 220 assert kind.equals(expectedValue.getValueKind()); 221 222 AMD64AddressValue addressValue = asAddressValue(address); 223 LIRKind integralAccessKind = accessKind; 224 Value reinterpretedExpectedValue = expectedValue; 225 Value reinterpretedNewValue = newValue; 226 boolean isXmm = ((AMD64Kind) accessKind.getPlatformKind()).isXMM(); 227 if (isXmm) { 228 if (accessKind.getPlatformKind().equals(AMD64Kind.SINGLE)) { 229 integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Int); 230 } else { 231 integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Long); 232 } 233 reinterpretedExpectedValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, expectedValue); 234 reinterpretedNewValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, newValue); 235 } 236 AMD64Kind memKind = (AMD64Kind) integralAccessKind.getPlatformKind(); 237 RegisterValue aRes = AMD64.rax.asValue(integralAccessKind); 238 AllocatableValue allocatableNewValue = asAllocatable(reinterpretedNewValue, integralAccessKind); 239 emitMove(aRes, reinterpretedExpectedValue); 240 append(new CompareAndSwapOp(memKind, aRes, addressValue, aRes, allocatableNewValue)); 241 242 if (isLogic) { 243 assert trueValue.getValueKind().equals(falseValue.getValueKind()); 244 Variable result = newVariable(trueValue.getValueKind()); 245 append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue)); 246 return result; 247 } else { 248 if (isXmm) { 249 return arithmeticLIRGen.emitReinterpret(accessKind, aRes); 250 } else { 251 Variable result = newVariable(kind); 252 emitMove(result, aRes); 253 return result; 254 } 255 } 256 } 257 258 @Override 259 public Variable emitLogicCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 260 return (Variable) emitCompareAndSwap(true, accessKind, address, expectedValue, newValue, trueValue, falseValue); 261 } 262 263 @Override 264 public Value emitValueCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue) { 265 return emitCompareAndSwap(false, accessKind, address, expectedValue, newValue, null, null); 266 } 267 268 public void emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel, 269 double trueLabelProbability) { 270 assert kind.getPlatformKind().getSizeInBytes() <= expectedValue.getValueKind().getPlatformKind().getSizeInBytes(); 271 assert kind.getPlatformKind().getSizeInBytes() <= newValue.getValueKind().getPlatformKind().getSizeInBytes(); 272 assert condition == Condition.EQ || condition == Condition.NE; 273 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 274 RegisterValue raxValue = AMD64.rax.asValue(kind); 275 emitMove(raxValue, expectedValue); 276 append(new CompareAndSwapOp(memKind, raxValue, address, raxValue, asAllocatable(newValue))); 277 append(new BranchOp(condition, trueLabel, falseLabel, trueLabelProbability)); 278 } 279 280 @Override 281 public Value emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta) { 282 Variable result = newVariable(kind); 283 AMD64AddressValue addressValue = asAddressValue(address); 284 append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta))); 285 return result; 286 } 287 288 @Override 289 public Value emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue) { 290 Variable result = newVariable(kind); 291 AMD64AddressValue addressValue = asAddressValue(address); 292 append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue))); 293 return result; 294 } 295 296 @Override 297 public void emitNullCheck(Value address, LIRFrameState state) { 298 append(new AMD64Move.NullCheckOp(asAddressValue(address), state)); 299 } 300 301 @Override 302 public void emitJump(LabelRef label) { 303 assert label != null; 304 append(new JumpOp(label)); 305 } 306 307 @Override 308 public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) { 309 Condition finalCondition = emitCompare(cmpKind, left, right, cond); 310 if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) { 311 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 312 } else { 313 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 314 } 315 } 316 317 public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, 318 double trueLabelProbability) { 319 boolean mirrored = emitCompareMemory(cmpKind, left, right, state); 320 Condition finalCondition = mirrored ? cond.mirror() : cond; 321 if (cmpKind.isXMM()) { 322 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 323 } else { 324 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 325 } 326 } 327 328 @Override 329 public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) { 330 append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability)); 331 } 332 333 @Override 334 public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 335 emitIntegerTest(left, right); 336 append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability)); 337 } 338 339 @Override 340 public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) { 341 boolean isFloatComparison = cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE; 342 343 Condition finalCondition = cond; 344 Value finalTrueValue = trueValue; 345 Value finalFalseValue = falseValue; 346 if (isFloatComparison) { 347 // eliminate the parity check in case of a float comparison 348 Value finalLeft = left; 349 Value finalRight = right; 350 if (unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition)) { 351 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.mirror())) { 352 finalCondition = finalCondition.mirror(); 353 finalLeft = right; 354 finalRight = left; 355 } else if (finalCondition != Condition.EQ && finalCondition != Condition.NE) { 356 // negating EQ and NE does not make any sense as we would need to negate 357 // unorderedIsTrue as well (otherwise, we would no longer fulfill the Java 358 // NaN semantics) 359 assert unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate()); 360 finalCondition = finalCondition.negate(); 361 finalTrueValue = falseValue; 362 finalFalseValue = trueValue; 363 } 364 } 365 emitRawCompare(cmpKind, finalLeft, finalRight); 366 } else { 367 finalCondition = emitCompare(cmpKind, left, right, cond); 368 } 369 370 boolean isParityCheckNecessary = isFloatComparison && unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition); 371 Variable result = newVariable(finalTrueValue.getValueKind()); 372 if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 1) && isIntConstant(finalFalseValue, 0)) { 373 if (isFloatComparison) { 374 append(new FloatCondSetOp(result, finalCondition)); 375 } else { 376 append(new CondSetOp(result, finalCondition)); 377 } 378 } else if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 0) && isIntConstant(finalFalseValue, 1)) { 379 if (isFloatComparison) { 380 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate())) { 381 append(new FloatCondSetOp(result, finalCondition.negate())); 382 } else { 383 append(new FloatCondSetOp(result, finalCondition)); 384 Variable negatedResult = newVariable(result.getValueKind()); 385 append(new AMD64Binary.ConstOp(AMD64BinaryArithmetic.XOR, OperandSize.get(result.getPlatformKind()), negatedResult, result, 1)); 386 result = negatedResult; 387 } 388 } else { 389 append(new CondSetOp(result, finalCondition.negate())); 390 } 391 } else if (isFloatComparison) { 392 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue))); 393 } else { 394 append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue))); 395 } 396 return result; 397 } 398 399 @Override 400 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) { 401 emitIntegerTest(left, right); 402 Variable result = newVariable(trueValue.getValueKind()); 403 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue))); 404 return result; 405 } 406 407 private void emitIntegerTest(Value a, Value b) { 408 assert ((AMD64Kind) a.getPlatformKind()).isInteger(); 409 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD; 410 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) { 411 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong())); 412 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) { 413 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong())); 414 } else if (isAllocatableValue(b)) { 415 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a))); 416 } else { 417 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b))); 418 } 419 } 420 421 /** 422 * This method emits the compare against memory instruction, and may reorder the operands. It 423 * returns true if it did so. 424 * 425 * @param b the right operand of the comparison 426 * @return true if the left and right operands were switched, false otherwise 427 */ 428 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) { 429 OperandSize size; 430 switch (cmpKind) { 431 case BYTE: 432 size = OperandSize.BYTE; 433 break; 434 case WORD: 435 size = OperandSize.WORD; 436 break; 437 case DWORD: 438 size = OperandSize.DWORD; 439 break; 440 case QWORD: 441 size = OperandSize.QWORD; 442 break; 443 case SINGLE: 444 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state)); 445 return false; 446 case DOUBLE: 447 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state)); 448 return false; 449 default: 450 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 451 } 452 453 if (isConstantValue(a)) { 454 return emitCompareMemoryConOp(size, asConstantValue(a), b, state); 455 } else { 456 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 457 } 458 } 459 460 protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) { 461 if (JavaConstant.isNull(a.getConstant())) { 462 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state)); 463 return true; 464 } else if (a.getConstant() instanceof VMConstant && size == DWORD) { 465 VMConstant vc = (VMConstant) a.getConstant(); 466 append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state)); 467 return true; 468 } else { 469 long value = a.getJavaConstant().asLong(); 470 if (NumUtil.is32bit(value)) { 471 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state)); 472 return true; 473 } else { 474 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 475 } 476 } 477 } 478 479 private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) { 480 AMD64RMOp op = CMP.getRMOpcode(size); 481 append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state)); 482 return false; 483 } 484 485 /** 486 * This method emits the compare instruction, and may reorder the operands. It returns true if 487 * it did so. 488 * 489 * @param a the left operand of the comparison 490 * @param b the right operand of the comparison 491 * @param cond the condition of the comparison 492 * @return true if the left and right operands were switched, false otherwise 493 */ 494 private Condition emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond) { 495 if (LIRValueUtil.isVariable(b)) { 496 emitRawCompare(cmpKind, b, a); 497 return cond.mirror(); 498 } else { 499 emitRawCompare(cmpKind, a, b); 500 return cond; 501 } 502 } 503 504 private void emitRawCompare(PlatformKind cmpKind, Value left, Value right) { 505 ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, load(left), loadNonConst(right)); 506 } 507 508 @Override 509 public void emitMembar(int barriers) { 510 int necessaryBarriers = target().arch.requiredBarriers(barriers); 511 if (target().isMP && necessaryBarriers != 0) { 512 append(new MembarOp(necessaryBarriers)); 513 } 514 } 515 516 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments); 517 518 @Override 519 protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) { 520 long maxOffset = linkage.getMaxCallTargetOffset(); 521 if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) { 522 append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info)); 523 } else { 524 append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info)); 525 } 526 } 527 528 @Override 529 public Variable emitByteSwap(Value input) { 530 Variable result = newVariable(LIRKind.combine(input)); 531 append(new AMD64ByteSwapOp(result, input)); 532 return result; 533 } 534 535 @Override 536 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) { 537 LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD); 538 RegisterValue raxRes = AMD64.rax.asValue(resultKind); 539 RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind()); 540 RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind()); 541 emitMove(cnt1, length1); 542 emitMove(cnt2, length2); 543 append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2)); 544 Variable result = newVariable(resultKind); 545 emitMove(result, raxRes); 546 return result; 547 } 548 549 @Override 550 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, int constantLength, boolean directPointers) { 551 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 552 append(new AMD64ArrayEqualsOp(this, kind, kind, result, array1, array2, asAllocatable(length), constantLength, directPointers, getMaxVectorSize())); 553 return result; 554 } 555 556 @Override 557 public Variable emitArrayEquals(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length, int constantLength, boolean directPointers) { 558 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 559 append(new AMD64ArrayEqualsOp(this, kind1, kind2, result, array1, array2, asAllocatable(length), constantLength, directPointers, getMaxVectorSize())); 560 return result; 561 } 562 563 /** 564 * Return a conservative estimate of the page size for use by the String.indexOf intrinsic. 565 */ 566 protected int getVMPageSize() { 567 return 4096; 568 } 569 570 /** 571 * Return the maximum size of vector registers used in SSE/AVX instructions. 572 */ 573 protected int getMaxVectorSize() { 574 // default for "unlimited" 575 return -1; 576 } 577 578 @Override 579 public Variable emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues) { 580 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 581 append(new AMD64ArrayIndexOfOp(arrayKind, valueKind, findTwoConsecutive, getMaxVectorSize(), this, result, 582 asAllocatable(arrayPointer), asAllocatable(arrayLength), asAllocatable(fromIndex), searchValues)); 583 return result; 584 } 585 586 @Override 587 public void emitStringLatin1Inflate(Value src, Value dst, Value len) { 588 RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind()); 589 RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind()); 590 RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind()); 591 592 emitMove(rsrc, src); 593 emitMove(rdst, dst); 594 emitMove(rlen, len); 595 596 append(new AMD64StringLatin1InflateOp(this, rsrc, rdst, rlen)); 597 } 598 599 @Override 600 public Variable emitStringUTF16Compress(Value src, Value dst, Value len) { 601 RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind()); 602 RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind()); 603 RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind()); 604 605 emitMove(rsrc, src); 606 emitMove(rdst, dst); 607 emitMove(rlen, len); 608 609 LIRKind reskind = LIRKind.value(AMD64Kind.DWORD); 610 RegisterValue rres = AMD64.rax.asValue(reskind); 611 612 append(new AMD64StringUTF16CompressOp(this, rres, rsrc, rdst, rlen)); 613 614 Variable res = newVariable(reskind); 615 emitMove(res, rres); 616 return res; 617 } 618 619 @Override 620 public void emitReturn(JavaKind kind, Value input) { 621 AllocatableValue operand = Value.ILLEGAL; 622 if (input != null) { 623 operand = resultOperandFor(kind, input.getValueKind()); 624 emitMove(operand, input); 625 } 626 append(new ReturnOp(operand)); 627 } 628 629 protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) { 630 return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp); 631 } 632 633 @Override 634 public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) { 635 // a temp is needed for loading object constants 636 boolean needsTemp = !LIRKind.isValue(key); 637 append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL)); 638 } 639 640 @Override 641 protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { 642 append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind()))); 643 } 644 645 @Override 646 protected Optional<Hasher> hasherFor(JavaConstant[] keyConstants, double minDensity) { 647 return Hasher.forKeys(keyConstants, minDensity); 648 } 649 650 @Override 651 protected void emitHashTableSwitch(Hasher hasher, JavaConstant[] keys, LabelRef defaultTarget, LabelRef[] targets, Value value) { 652 Value index = hasher.hash(value, arithmeticLIRGen); 653 Variable scratch = newVariable(LIRKind.value(target().arch.getWordKind())); 654 Variable entryScratch = newVariable(LIRKind.value(target().arch.getWordKind())); 655 append(new HashTableSwitchOp(keys, defaultTarget, targets, value, index, scratch, entryScratch)); 656 } 657 658 @Override 659 public void emitPause() { 660 append(new AMD64PauseOp()); 661 } 662 663 @Override 664 public SaveRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) { 665 return new AMD64ZapRegistersOp(zappedRegisters, zapValues); 666 } 667 668 @Override 669 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) { 670 return new AMD64ZapStackOp(zappedStack, zapValues); 671 } 672 673 @Override 674 public void emitSpeculationFence() { 675 append(new AMD64LFenceOp()); 676 } 677 }