1 /* 2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static jdk.vm.ci.code.ValueUtil.asRegister; 29 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue; 30 import static jdk.vm.ci.code.ValueUtil.isRegister; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 32 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 36 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 37 import static org.graalvm.compiler.lir.LIRValueUtil.asConstant; 38 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 39 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 40 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 41 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant; 42 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 43 44 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 45 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 46 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag; 48 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 49 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 50 import org.graalvm.compiler.core.common.LIRKind; 51 import org.graalvm.compiler.core.common.NumUtil; 52 import org.graalvm.compiler.core.common.calc.Condition; 53 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage; 54 import org.graalvm.compiler.core.common.spi.LIRKindTool; 55 import org.graalvm.compiler.debug.GraalError; 56 import org.graalvm.compiler.lir.ConstantValue; 57 import org.graalvm.compiler.lir.LIRFrameState; 58 import org.graalvm.compiler.lir.LIRInstruction; 59 import org.graalvm.compiler.lir.LIRValueUtil; 60 import org.graalvm.compiler.lir.LabelRef; 61 import org.graalvm.compiler.lir.StandardOp.JumpOp; 62 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp; 63 import org.graalvm.compiler.lir.SwitchStrategy; 64 import org.graalvm.compiler.lir.Variable; 65 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 66 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 67 import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp; 68 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp; 69 import org.graalvm.compiler.lir.amd64.AMD64ArrayIndexOfOp; 70 import org.graalvm.compiler.lir.amd64.AMD64Binary; 71 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 72 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp; 73 import org.graalvm.compiler.lir.amd64.AMD64Call; 74 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow; 75 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 76 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp; 77 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp; 78 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp; 79 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp; 80 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp; 81 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp; 82 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp; 83 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp; 84 import org.graalvm.compiler.lir.amd64.AMD64LFenceOp; 85 import org.graalvm.compiler.lir.amd64.AMD64Move; 86 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp; 87 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp; 88 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp; 89 import org.graalvm.compiler.lir.amd64.AMD64PauseOp; 90 import org.graalvm.compiler.lir.amd64.AMD64StringIndexOfOp; 91 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp; 92 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp; 93 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 94 import org.graalvm.compiler.lir.gen.LIRGenerator; 95 import org.graalvm.compiler.phases.util.Providers; 96 97 import jdk.vm.ci.amd64.AMD64; 98 import jdk.vm.ci.amd64.AMD64Kind; 99 import jdk.vm.ci.code.CallingConvention; 100 import jdk.vm.ci.code.Register; 101 import jdk.vm.ci.code.RegisterValue; 102 import jdk.vm.ci.code.StackSlot; 103 import jdk.vm.ci.meta.AllocatableValue; 104 import jdk.vm.ci.meta.JavaConstant; 105 import jdk.vm.ci.meta.JavaKind; 106 import jdk.vm.ci.meta.PlatformKind; 107 import jdk.vm.ci.meta.VMConstant; 108 import jdk.vm.ci.meta.Value; 109 import jdk.vm.ci.meta.ValueKind; 110 111 /** 112 * This class implements the AMD64 specific portion of the LIR generator. 113 */ 114 public abstract class AMD64LIRGenerator extends LIRGenerator { 115 116 public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) { 117 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes); 118 } 119 120 /** 121 * Checks whether the supplied constant can be used without loading it into a register for store 122 * operations, i.e., on the right hand side of a memory access. 123 * 124 * @param c The constant to check. 125 * @return True if the constant can be used directly, false if the constant needs to be in a 126 * register. 127 */ 128 protected static final boolean canStoreConstant(JavaConstant c) { 129 // there is no immediate move of 64-bit constants on Intel 130 switch (c.getJavaKind()) { 131 case Long: 132 return NumUtil.isInt(c.asLong()); 133 case Double: 134 return false; 135 case Object: 136 return c.isNull(); 137 default: 138 return true; 139 } 140 } 141 142 @Override 143 protected JavaConstant zapValueForKind(PlatformKind kind) { 144 long dead = 0xDEADDEADDEADDEADL; 145 switch ((AMD64Kind) kind) { 146 case BYTE: 147 return JavaConstant.forByte((byte) dead); 148 case WORD: 149 return JavaConstant.forShort((short) dead); 150 case DWORD: 151 return JavaConstant.forInt((int) dead); 152 case QWORD: 153 return JavaConstant.forLong(dead); 154 case SINGLE: 155 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead)); 156 default: 157 // we don't support vector types, so just zap with double for all of them 158 return JavaConstant.forDouble(Double.longBitsToDouble(dead)); 159 } 160 } 161 162 public AMD64AddressValue asAddressValue(Value address) { 163 if (address instanceof AMD64AddressValue) { 164 return (AMD64AddressValue) address; 165 } else { 166 if (address instanceof JavaConstant) { 167 long displacement = ((JavaConstant) address).asLong(); 168 if (NumUtil.isInt(displacement)) { 169 return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement); 170 } 171 } 172 return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0); 173 } 174 } 175 176 @Override 177 public Variable emitAddress(AllocatableValue stackslot) { 178 Variable result = newVariable(LIRKind.value(target().arch.getWordKind())); 179 append(new StackLeaOp(result, stackslot)); 180 return result; 181 } 182 183 /** 184 * The AMD64 backend only uses DWORD and QWORD values in registers because of a performance 185 * penalty when accessing WORD or BYTE registers. This function converts small integer kinds to 186 * DWORD. 187 */ 188 @Override 189 public <K extends ValueKind<K>> K toRegisterKind(K kind) { 190 switch ((AMD64Kind) kind.getPlatformKind()) { 191 case BYTE: 192 case WORD: 193 return kind.changeType(AMD64Kind.DWORD); 194 default: 195 return kind; 196 } 197 } 198 199 private AllocatableValue asAllocatable(Value value, ValueKind<?> kind) { 200 if (value.getValueKind().equals(kind)) { 201 return asAllocatable(value); 202 } else if (isRegister(value)) { 203 return asRegister(value).asValue(kind); 204 } else if (isConstantValue(value)) { 205 return emitLoadConstant(kind, asConstant(value)); 206 } else { 207 Variable variable = newVariable(kind); 208 emitMove(variable, value); 209 return variable; 210 } 211 } 212 213 private Value emitCompareAndSwap(boolean isLogic, LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 214 ValueKind<?> kind = newValue.getValueKind(); 215 assert kind.equals(expectedValue.getValueKind()); 216 217 AMD64AddressValue addressValue = asAddressValue(address); 218 LIRKind integralAccessKind = accessKind; 219 Value reinterpretedExpectedValue = expectedValue; 220 Value reinterpretedNewValue = newValue; 221 boolean isXmm = ((AMD64Kind) accessKind.getPlatformKind()).isXMM(); 222 if (isXmm) { 223 if (accessKind.getPlatformKind().equals(AMD64Kind.SINGLE)) { 224 integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Int); 225 } else { 226 integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Long); 227 } 228 reinterpretedExpectedValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, expectedValue); 229 reinterpretedNewValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, newValue); 230 } 231 AMD64Kind memKind = (AMD64Kind) integralAccessKind.getPlatformKind(); 232 RegisterValue aRes = AMD64.rax.asValue(integralAccessKind); 233 AllocatableValue allocatableNewValue = asAllocatable(reinterpretedNewValue, integralAccessKind); 234 emitMove(aRes, reinterpretedExpectedValue); 235 append(new CompareAndSwapOp(memKind, aRes, addressValue, aRes, allocatableNewValue)); 236 237 if (isLogic) { 238 assert trueValue.getValueKind().equals(falseValue.getValueKind()); 239 Variable result = newVariable(trueValue.getValueKind()); 240 append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue)); 241 return result; 242 } else { 243 if (isXmm) { 244 return arithmeticLIRGen.emitReinterpret(accessKind, aRes); 245 } else { 246 Variable result = newVariable(kind); 247 emitMove(result, aRes); 248 return result; 249 } 250 } 251 } 252 253 @Override 254 public Variable emitLogicCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 255 return (Variable) emitCompareAndSwap(true, accessKind, address, expectedValue, newValue, trueValue, falseValue); 256 } 257 258 @Override 259 public Value emitValueCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue) { 260 return emitCompareAndSwap(false, accessKind, address, expectedValue, newValue, null, null); 261 } 262 263 public void emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel, 264 double trueLabelProbability) { 265 assert kind.equals(expectedValue.getValueKind()); 266 assert kind.equals(newValue.getValueKind()); 267 assert condition == Condition.EQ || condition == Condition.NE; 268 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 269 RegisterValue raxValue = AMD64.rax.asValue(kind); 270 emitMove(raxValue, expectedValue); 271 append(new CompareAndSwapOp(memKind, raxValue, address, raxValue, asAllocatable(newValue))); 272 append(new BranchOp(condition, trueLabel, falseLabel, trueLabelProbability)); 273 } 274 275 @Override 276 public Value emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta) { 277 Variable result = newVariable(kind); 278 AMD64AddressValue addressValue = asAddressValue(address); 279 append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta))); 280 return result; 281 } 282 283 @Override 284 public Value emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue) { 285 Variable result = newVariable(kind); 286 AMD64AddressValue addressValue = asAddressValue(address); 287 append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue))); 288 return result; 289 } 290 291 @Override 292 public void emitNullCheck(Value address, LIRFrameState state) { 293 append(new AMD64Move.NullCheckOp(asAddressValue(address), state)); 294 } 295 296 @Override 297 public void emitJump(LabelRef label) { 298 assert label != null; 299 append(new JumpOp(label)); 300 } 301 302 @Override 303 public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) { 304 Condition finalCondition = emitCompare(cmpKind, left, right, cond); 305 if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) { 306 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 307 } else { 308 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 309 } 310 } 311 312 public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, 313 double trueLabelProbability) { 314 boolean mirrored = emitCompareMemory(cmpKind, left, right, state); 315 Condition finalCondition = mirrored ? cond.mirror() : cond; 316 if (cmpKind.isXMM()) { 317 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 318 } else { 319 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 320 } 321 } 322 323 @Override 324 public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) { 325 append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability)); 326 } 327 328 @Override 329 public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 330 emitIntegerTest(left, right); 331 append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability)); 332 } 333 334 @Override 335 public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) { 336 boolean isFloatComparison = cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE; 337 338 Condition finalCondition = cond; 339 Value finalTrueValue = trueValue; 340 Value finalFalseValue = falseValue; 341 if (isFloatComparison) { 342 // eliminate the parity check in case of a float comparison 343 Value finalLeft = left; 344 Value finalRight = right; 345 if (unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition)) { 346 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.mirror())) { 347 finalCondition = finalCondition.mirror(); 348 finalLeft = right; 349 finalRight = left; 350 } else if (finalCondition != Condition.EQ && finalCondition != Condition.NE) { 351 // negating EQ and NE does not make any sense as we would need to negate 352 // unorderedIsTrue as well (otherwise, we would no longer fulfill the Java 353 // NaN semantics) 354 assert unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate()); 355 finalCondition = finalCondition.negate(); 356 finalTrueValue = falseValue; 357 finalFalseValue = trueValue; 358 } 359 } 360 emitRawCompare(cmpKind, finalLeft, finalRight); 361 } else { 362 finalCondition = emitCompare(cmpKind, left, right, cond); 363 } 364 365 boolean isParityCheckNecessary = isFloatComparison && unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition); 366 Variable result = newVariable(finalTrueValue.getValueKind()); 367 if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 1) && isIntConstant(finalFalseValue, 0)) { 368 if (isFloatComparison) { 369 append(new FloatCondSetOp(result, finalCondition)); 370 } else { 371 append(new CondSetOp(result, finalCondition)); 372 } 373 } else if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 0) && isIntConstant(finalFalseValue, 1)) { 374 if (isFloatComparison) { 375 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate())) { 376 append(new FloatCondSetOp(result, finalCondition.negate())); 377 } else { 378 append(new FloatCondSetOp(result, finalCondition)); 379 Variable negatedResult = newVariable(result.getValueKind()); 380 append(new AMD64Binary.ConstOp(AMD64BinaryArithmetic.XOR, OperandSize.get(result.getPlatformKind()), negatedResult, result, 1)); 381 result = negatedResult; 382 } 383 } else { 384 append(new CondSetOp(result, finalCondition.negate())); 385 } 386 } else if (isFloatComparison) { 387 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue))); 388 } else { 389 append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue))); 390 } 391 return result; 392 } 393 394 @Override 395 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) { 396 emitIntegerTest(left, right); 397 Variable result = newVariable(trueValue.getValueKind()); 398 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue))); 399 return result; 400 } 401 402 private void emitIntegerTest(Value a, Value b) { 403 assert ((AMD64Kind) a.getPlatformKind()).isInteger(); 404 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD; 405 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) { 406 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong())); 407 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) { 408 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong())); 409 } else if (isAllocatableValue(b)) { 410 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a))); 411 } else { 412 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b))); 413 } 414 } 415 416 /** 417 * This method emits the compare against memory instruction, and may reorder the operands. It 418 * returns true if it did so. 419 * 420 * @param b the right operand of the comparison 421 * @return true if the left and right operands were switched, false otherwise 422 */ 423 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) { 424 OperandSize size; 425 switch (cmpKind) { 426 case BYTE: 427 size = OperandSize.BYTE; 428 break; 429 case WORD: 430 size = OperandSize.WORD; 431 break; 432 case DWORD: 433 size = OperandSize.DWORD; 434 break; 435 case QWORD: 436 size = OperandSize.QWORD; 437 break; 438 case SINGLE: 439 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state)); 440 return false; 441 case DOUBLE: 442 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state)); 443 return false; 444 default: 445 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 446 } 447 448 if (isConstantValue(a)) { 449 return emitCompareMemoryConOp(size, asConstantValue(a), b, state); 450 } else { 451 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 452 } 453 } 454 455 protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) { 456 if (JavaConstant.isNull(a.getConstant())) { 457 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state)); 458 return true; 459 } else if (a.getConstant() instanceof VMConstant && size == DWORD) { 460 VMConstant vc = (VMConstant) a.getConstant(); 461 append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state)); 462 return true; 463 } else { 464 long value = a.getJavaConstant().asLong(); 465 if (NumUtil.is32bit(value)) { 466 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state)); 467 return true; 468 } else { 469 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 470 } 471 } 472 } 473 474 private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) { 475 AMD64RMOp op = CMP.getRMOpcode(size); 476 append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state)); 477 return false; 478 } 479 480 /** 481 * This method emits the compare instruction, and may reorder the operands. It returns true if 482 * it did so. 483 * 484 * @param a the left operand of the comparison 485 * @param b the right operand of the comparison 486 * @param cond the condition of the comparison 487 * @return true if the left and right operands were switched, false otherwise 488 */ 489 private Condition emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond) { 490 if (LIRValueUtil.isVariable(b)) { 491 emitRawCompare(cmpKind, b, a); 492 return cond.mirror(); 493 } else { 494 emitRawCompare(cmpKind, a, b); 495 return cond; 496 } 497 } 498 499 private void emitRawCompare(PlatformKind cmpKind, Value left, Value right) { 500 ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, load(left), loadNonConst(right)); 501 } 502 503 @Override 504 public void emitMembar(int barriers) { 505 int necessaryBarriers = target().arch.requiredBarriers(barriers); 506 if (target().isMP && necessaryBarriers != 0) { 507 append(new MembarOp(necessaryBarriers)); 508 } 509 } 510 511 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments); 512 513 @Override 514 protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) { 515 long maxOffset = linkage.getMaxCallTargetOffset(); 516 if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) { 517 append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info)); 518 } else { 519 append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info)); 520 } 521 } 522 523 @Override 524 public Variable emitByteSwap(Value input) { 525 Variable result = newVariable(LIRKind.combine(input)); 526 append(new AMD64ByteSwapOp(result, input)); 527 return result; 528 } 529 530 @Override 531 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) { 532 LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD); 533 RegisterValue raxRes = AMD64.rax.asValue(resultKind); 534 RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind()); 535 RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind()); 536 emitMove(cnt1, length1); 537 emitMove(cnt2, length2); 538 append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2)); 539 Variable result = newVariable(resultKind); 540 emitMove(result, raxRes); 541 return result; 542 } 543 544 @Override 545 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) { 546 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 547 append(new AMD64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length))); 548 return result; 549 } 550 551 /** 552 * Return a conservative estimate of the page size for use by the String.indexOf intrinsic. 553 */ 554 protected int getVMPageSize() { 555 return 4096; 556 } 557 558 @Override 559 public Variable emitStringIndexOf(Value source, Value sourceCount, Value target, Value targetCount, int constantTargetCount) { 560 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 561 RegisterValue cnt1 = AMD64.rdx.asValue(sourceCount.getValueKind()); 562 emitMove(cnt1, sourceCount); 563 RegisterValue cnt2 = AMD64.rax.asValue(targetCount.getValueKind()); 564 emitMove(cnt2, targetCount); 565 append(new AMD64StringIndexOfOp(this, result, source, target, cnt1, cnt2, AMD64.rcx.asValue(), AMD64.xmm0.asValue(), constantTargetCount, getVMPageSize())); 566 return result; 567 } 568 569 @Override 570 public Variable emitArrayIndexOf(JavaKind kind, Value arrayPointer, Value arrayLength, Value charValue) { 571 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 572 append(new AMD64ArrayIndexOfOp(kind, getVMPageSize(), this, result, asAllocatable(arrayPointer), asAllocatable(arrayLength), asAllocatable(charValue))); 573 return result; 574 } 575 576 @Override 577 public void emitReturn(JavaKind kind, Value input) { 578 AllocatableValue operand = Value.ILLEGAL; 579 if (input != null) { 580 operand = resultOperandFor(kind, input.getValueKind()); 581 emitMove(operand, input); 582 } 583 append(new ReturnOp(operand)); 584 } 585 586 protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) { 587 return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp); 588 } 589 590 @Override 591 public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) { 592 // a temp is needed for loading object constants 593 boolean needsTemp = !LIRKind.isValue(key); 594 append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL)); 595 } 596 597 @Override 598 protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { 599 append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind()))); 600 } 601 602 @Override 603 public void emitPause() { 604 append(new AMD64PauseOp()); 605 } 606 607 @Override 608 public SaveRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) { 609 return new AMD64ZapRegistersOp(zappedRegisters, zapValues); 610 } 611 612 @Override 613 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) { 614 return new AMD64ZapStackOp(zappedStack, zapValues); 615 } 616 617 public void emitLFence() { 618 append(new AMD64LFenceOp()); 619 } 620 }