1 /* 2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 package org.graalvm.compiler.core.amd64; 25 26 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue; 27 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PD; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PS; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD; 32 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 33 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 34 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 35 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 36 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant; 37 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 38 39 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 41 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 42 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag; 43 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize; 44 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 45 import org.graalvm.compiler.core.common.LIRKind; 46 import org.graalvm.compiler.core.common.NumUtil; 47 import org.graalvm.compiler.core.common.calc.Condition; 48 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage; 49 import org.graalvm.compiler.core.common.spi.LIRKindTool; 50 import org.graalvm.compiler.debug.GraalError; 51 import org.graalvm.compiler.lir.ConstantValue; 52 import org.graalvm.compiler.lir.LIRFrameState; 53 import org.graalvm.compiler.lir.LIRInstruction; 54 import org.graalvm.compiler.lir.LIRValueUtil; 55 import org.graalvm.compiler.lir.LabelRef; 56 import org.graalvm.compiler.lir.StandardOp.JumpOp; 57 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp; 58 import org.graalvm.compiler.lir.SwitchStrategy; 59 import org.graalvm.compiler.lir.Variable; 60 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 61 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 62 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp; 63 import org.graalvm.compiler.lir.amd64.AMD64Binary; 64 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 65 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp; 66 import org.graalvm.compiler.lir.amd64.AMD64Call; 67 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow; 68 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 69 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp; 70 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp; 71 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp; 72 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp; 73 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp; 74 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp; 75 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp; 76 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp; 77 import org.graalvm.compiler.lir.amd64.AMD64Move; 78 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp; 79 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp; 80 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp; 81 import org.graalvm.compiler.lir.amd64.AMD64PauseOp; 82 import org.graalvm.compiler.lir.amd64.AMD64StringIndexOfOp; 83 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp; 84 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp; 85 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 86 import org.graalvm.compiler.lir.gen.LIRGenerator; 87 import org.graalvm.compiler.phases.util.Providers; 88 89 import jdk.vm.ci.amd64.AMD64; 90 import jdk.vm.ci.amd64.AMD64Kind; 91 import jdk.vm.ci.code.CallingConvention; 92 import jdk.vm.ci.code.Register; 93 import jdk.vm.ci.code.RegisterValue; 94 import jdk.vm.ci.code.StackSlot; 95 import jdk.vm.ci.meta.AllocatableValue; 96 import jdk.vm.ci.meta.JavaConstant; 97 import jdk.vm.ci.meta.JavaKind; 98 import jdk.vm.ci.meta.PlatformKind; 99 import jdk.vm.ci.meta.VMConstant; 100 import jdk.vm.ci.meta.Value; 101 import jdk.vm.ci.meta.ValueKind; 102 103 /** 104 * This class implements the AMD64 specific portion of the LIR generator. 105 */ 106 public abstract class AMD64LIRGenerator extends LIRGenerator { 107 108 public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) { 109 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes); 110 } 111 112 /** 113 * Checks whether the supplied constant can be used without loading it into a register for store 114 * operations, i.e., on the right hand side of a memory access. 115 * 116 * @param c The constant to check. 117 * @return True if the constant can be used directly, false if the constant needs to be in a 118 * register. 119 */ 120 protected static final boolean canStoreConstant(JavaConstant c) { 121 // there is no immediate move of 64-bit constants on Intel 122 switch (c.getJavaKind()) { 123 case Long: 124 return NumUtil.isInt(c.asLong()); 125 case Double: 126 return false; 127 case Object: 128 return c.isNull(); 129 default: 130 return true; 131 } 132 } 133 134 @Override 135 protected JavaConstant zapValueForKind(PlatformKind kind) { 136 long dead = 0xDEADDEADDEADDEADL; 137 switch ((AMD64Kind) kind) { 138 case BYTE: 139 return JavaConstant.forByte((byte) dead); 140 case WORD: 141 return JavaConstant.forShort((short) dead); 142 case DWORD: 143 return JavaConstant.forInt((int) dead); 144 case QWORD: 145 return JavaConstant.forLong(dead); 146 case SINGLE: 147 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead)); 148 default: 149 // we don't support vector types, so just zap with double for all of them 150 return JavaConstant.forDouble(Double.longBitsToDouble(dead)); 151 } 152 } 153 154 public AMD64AddressValue asAddressValue(Value address) { 155 if (address instanceof AMD64AddressValue) { 156 return (AMD64AddressValue) address; 157 } else { 158 if (address instanceof JavaConstant) { 159 long displacement = ((JavaConstant) address).asLong(); 160 if (NumUtil.isInt(displacement)) { 161 return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement); 162 } 163 } 164 return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0); 165 } 166 } 167 168 @Override 169 public Variable emitAddress(AllocatableValue stackslot) { 170 Variable result = newVariable(LIRKind.value(target().arch.getWordKind())); 171 append(new StackLeaOp(result, stackslot)); 172 return result; 173 } 174 175 /** 176 * The AMD64 backend only uses DWORD and QWORD values in registers because of a performance 177 * penalty when accessing WORD or BYTE registers. This function converts small integer kinds to 178 * DWORD. 179 */ 180 @Override 181 public <K extends ValueKind<K>> K toRegisterKind(K kind) { 182 switch ((AMD64Kind) kind.getPlatformKind()) { 183 case BYTE: 184 case WORD: 185 return kind.changeType(AMD64Kind.DWORD); 186 default: 187 return kind; 188 } 189 } 190 191 @Override 192 public Variable emitLogicCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 193 ValueKind<?> kind = newValue.getValueKind(); 194 assert kind.equals(expectedValue.getValueKind()); 195 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 196 197 AMD64AddressValue addressValue = asAddressValue(address); 198 RegisterValue raxRes = AMD64.rax.asValue(kind); 199 emitMove(raxRes, expectedValue); 200 append(new CompareAndSwapOp(memKind, raxRes, addressValue, raxRes, asAllocatable(newValue))); 201 202 assert trueValue.getValueKind().equals(falseValue.getValueKind()); 203 Variable result = newVariable(trueValue.getValueKind()); 204 append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue)); 205 return result; 206 } 207 208 @Override 209 public Value emitValueCompareAndSwap(Value address, Value expectedValue, Value newValue) { 210 ValueKind<?> kind = newValue.getValueKind(); 211 assert kind.equals(expectedValue.getValueKind()); 212 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 213 214 AMD64AddressValue addressValue = asAddressValue(address); 215 RegisterValue raxRes = AMD64.rax.asValue(kind); 216 emitMove(raxRes, expectedValue); 217 append(new CompareAndSwapOp(memKind, raxRes, addressValue, raxRes, asAllocatable(newValue))); 218 Variable result = newVariable(kind); 219 emitMove(result, raxRes); 220 return result; 221 } 222 223 public void emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel, 224 double trueLabelProbability) { 225 assert kind.equals(expectedValue.getValueKind()); 226 assert kind.equals(newValue.getValueKind()); 227 assert condition == Condition.EQ || condition == Condition.NE; 228 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 229 RegisterValue raxValue = AMD64.rax.asValue(kind); 230 emitMove(raxValue, expectedValue); 231 append(new CompareAndSwapOp(memKind, raxValue, address, raxValue, asAllocatable(newValue))); 232 append(new BranchOp(condition, trueLabel, falseLabel, trueLabelProbability)); 233 } 234 235 @Override 236 public Value emitAtomicReadAndAdd(Value address, Value delta) { 237 ValueKind<?> kind = delta.getValueKind(); 238 Variable result = newVariable(kind); 239 AMD64AddressValue addressValue = asAddressValue(address); 240 append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta))); 241 return result; 242 } 243 244 @Override 245 public Value emitAtomicReadAndWrite(Value address, Value newValue) { 246 ValueKind<?> kind = newValue.getValueKind(); 247 Variable result = newVariable(kind); 248 AMD64AddressValue addressValue = asAddressValue(address); 249 append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue))); 250 return result; 251 } 252 253 @Override 254 public void emitNullCheck(Value address, LIRFrameState state) { 255 append(new AMD64Move.NullCheckOp(asAddressValue(address), state)); 256 } 257 258 @Override 259 public void emitJump(LabelRef label) { 260 assert label != null; 261 append(new JumpOp(label)); 262 } 263 264 @Override 265 public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) { 266 Condition finalCondition = emitCompare(cmpKind, left, right, cond); 267 if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) { 268 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 269 } else { 270 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 271 } 272 } 273 274 public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, 275 double trueLabelProbability) { 276 boolean mirrored = emitCompareMemory(cmpKind, left, right, state); 277 Condition finalCondition = mirrored ? cond.mirror() : cond; 278 if (cmpKind.isXMM()) { 279 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 280 } else { 281 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 282 } 283 } 284 285 @Override 286 public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) { 287 append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability)); 288 } 289 290 @Override 291 public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 292 emitIntegerTest(left, right); 293 append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability)); 294 } 295 296 @Override 297 public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) { 298 boolean isFloatComparison = cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE; 299 300 Condition finalCondition = cond; 301 Value finalTrueValue = trueValue; 302 Value finalFalseValue = falseValue; 303 if (isFloatComparison) { 304 // eliminate the parity check in case of a float comparison 305 Value finalLeft = left; 306 Value finalRight = right; 307 if (unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition)) { 308 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.mirror())) { 309 finalCondition = finalCondition.mirror(); 310 finalLeft = right; 311 finalRight = left; 312 } else if (finalCondition != Condition.EQ && finalCondition != Condition.NE) { 313 // negating EQ and NE does not make any sense as we would need to negate 314 // unorderedIsTrue as well (otherwise, we would no longer fulfill the Java 315 // NaN semantics) 316 assert unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate()); 317 finalCondition = finalCondition.negate(); 318 finalTrueValue = falseValue; 319 finalFalseValue = trueValue; 320 } 321 } 322 emitRawCompare(cmpKind, finalLeft, finalRight); 323 } else { 324 finalCondition = emitCompare(cmpKind, left, right, cond); 325 } 326 327 boolean isParityCheckNecessary = isFloatComparison && unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition); 328 Variable result = newVariable(finalTrueValue.getValueKind()); 329 if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 1) && isIntConstant(finalFalseValue, 0)) { 330 if (isFloatComparison) { 331 append(new FloatCondSetOp(result, finalCondition)); 332 } else { 333 append(new CondSetOp(result, finalCondition)); 334 } 335 } else if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 0) && isIntConstant(finalFalseValue, 1)) { 336 if (isFloatComparison) { 337 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate())) { 338 append(new FloatCondSetOp(result, finalCondition.negate())); 339 } else { 340 append(new FloatCondSetOp(result, finalCondition)); 341 Variable negatedResult = newVariable(result.getValueKind()); 342 append(new AMD64Binary.ConstOp(AMD64BinaryArithmetic.XOR, OperandSize.get(result.getPlatformKind()), negatedResult, result, 1)); 343 result = negatedResult; 344 } 345 } else { 346 append(new CondSetOp(result, finalCondition.negate())); 347 } 348 } else if (isFloatComparison) { 349 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue))); 350 } else { 351 append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue))); 352 } 353 return result; 354 } 355 356 @Override 357 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) { 358 emitIntegerTest(left, right); 359 Variable result = newVariable(trueValue.getValueKind()); 360 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue))); 361 return result; 362 } 363 364 private void emitIntegerTest(Value a, Value b) { 365 assert ((AMD64Kind) a.getPlatformKind()).isInteger(); 366 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD; 367 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) { 368 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong())); 369 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) { 370 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong())); 371 } else if (isAllocatableValue(b)) { 372 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a))); 373 } else { 374 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b))); 375 } 376 } 377 378 /** 379 * This method emits the compare against memory instruction, and may reorder the operands. It 380 * returns true if it did so. 381 * 382 * @param b the right operand of the comparison 383 * @return true if the left and right operands were switched, false otherwise 384 */ 385 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) { 386 OperandSize size; 387 switch (cmpKind) { 388 case BYTE: 389 size = OperandSize.BYTE; 390 break; 391 case WORD: 392 size = OperandSize.WORD; 393 break; 394 case DWORD: 395 size = OperandSize.DWORD; 396 break; 397 case QWORD: 398 size = OperandSize.QWORD; 399 break; 400 case SINGLE: 401 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state)); 402 return false; 403 case DOUBLE: 404 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state)); 405 return false; 406 default: 407 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 408 } 409 410 if (isConstantValue(a)) { 411 return emitCompareMemoryConOp(size, asConstantValue(a), b, state); 412 } else { 413 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 414 } 415 } 416 417 protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) { 418 if (JavaConstant.isNull(a.getConstant())) { 419 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state)); 420 return true; 421 } else if (a.getConstant() instanceof VMConstant && size == DWORD) { 422 VMConstant vc = (VMConstant) a.getConstant(); 423 append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state)); 424 return true; 425 } else { 426 long value = a.getJavaConstant().asLong(); 427 if (NumUtil.is32bit(value)) { 428 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state)); 429 return true; 430 } else { 431 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 432 } 433 } 434 } 435 436 private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) { 437 AMD64RMOp op = CMP.getRMOpcode(size); 438 append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state)); 439 return false; 440 } 441 442 /** 443 * This method emits the compare instruction, and may reorder the operands. It returns true if 444 * it did so. 445 * 446 * @param a the left operand of the comparison 447 * @param b the right operand of the comparison 448 * @param cond the condition of the comparison 449 * @return true if the left and right operands were switched, false otherwise 450 */ 451 private Condition emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond) { 452 if (LIRValueUtil.isVariable(b)) { 453 emitRawCompare(cmpKind, b, a); 454 return cond.mirror(); 455 } else { 456 emitRawCompare(cmpKind, a, b); 457 return cond; 458 } 459 } 460 461 private void emitRawCompare(PlatformKind cmpKind, Value left, Value right) { 462 ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, load(left), loadNonConst(right)); 463 } 464 465 @Override 466 public void emitMembar(int barriers) { 467 int necessaryBarriers = target().arch.requiredBarriers(barriers); 468 if (target().isMP && necessaryBarriers != 0) { 469 append(new MembarOp(necessaryBarriers)); 470 } 471 } 472 473 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments); 474 475 @Override 476 protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) { 477 long maxOffset = linkage.getMaxCallTargetOffset(); 478 if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) { 479 append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info)); 480 } else { 481 append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info)); 482 } 483 } 484 485 @Override 486 public Variable emitByteSwap(Value input) { 487 Variable result = newVariable(LIRKind.combine(input)); 488 append(new AMD64ByteSwapOp(result, input)); 489 return result; 490 } 491 492 @Override 493 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) { 494 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 495 append(new AMD64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length))); 496 return result; 497 } 498 499 /** 500 * Return a conservative estimate of the page size for use by the String.indexOf intrinsic. 501 */ 502 protected int getVMPageSize() { 503 return 4096; 504 } 505 506 @Override 507 public Variable emitStringIndexOf(Value source, Value sourceCount, Value target, Value targetCount, int constantTargetCount) { 508 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 509 RegisterValue cnt1 = AMD64.rdx.asValue(sourceCount.getValueKind()); 510 emitMove(cnt1, sourceCount); 511 RegisterValue cnt2 = AMD64.rax.asValue(targetCount.getValueKind()); 512 emitMove(cnt2, targetCount); 513 append(new AMD64StringIndexOfOp(this, result, source, target, cnt1, cnt2, AMD64.rcx.asValue(), AMD64.xmm0.asValue(), constantTargetCount, getVMPageSize())); 514 return result; 515 } 516 517 @Override 518 public void emitReturn(JavaKind kind, Value input) { 519 AllocatableValue operand = Value.ILLEGAL; 520 if (input != null) { 521 operand = resultOperandFor(kind, input.getValueKind()); 522 emitMove(operand, input); 523 } 524 append(new ReturnOp(operand)); 525 } 526 527 protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) { 528 return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp); 529 } 530 531 @Override 532 public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) { 533 // a temp is needed for loading object constants 534 boolean needsTemp = !LIRKind.isValue(key); 535 append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL)); 536 } 537 538 @Override 539 protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { 540 append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind()))); 541 } 542 543 @Override 544 public void emitPause() { 545 append(new AMD64PauseOp()); 546 } 547 548 @Override 549 public SaveRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) { 550 return new AMD64ZapRegistersOp(zappedRegisters, zapValues); 551 } 552 553 @Override 554 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) { 555 return new AMD64ZapStackOp(zappedStack, zapValues); 556 } 557 }