1 /* 2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 package org.graalvm.compiler.core.amd64; 25 26 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 27 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD; 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PS; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD; 31 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 32 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 33 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 34 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 35 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue; 36 37 import org.graalvm.compiler.asm.NumUtil; 38 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 39 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag; 41 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize; 42 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 43 import org.graalvm.compiler.core.common.LIRKind; 44 import org.graalvm.compiler.core.common.calc.Condition; 45 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage; 46 import org.graalvm.compiler.core.common.spi.LIRKindTool; 47 import org.graalvm.compiler.debug.GraalError; 48 import org.graalvm.compiler.lir.ConstantValue; 49 import org.graalvm.compiler.lir.LIRFrameState; 50 import org.graalvm.compiler.lir.LIRInstruction; 51 import org.graalvm.compiler.lir.LIRValueUtil; 52 import org.graalvm.compiler.lir.LabelRef; 53 import org.graalvm.compiler.lir.StandardOp.JumpOp; 54 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp; 55 import org.graalvm.compiler.lir.SwitchStrategy; 56 import org.graalvm.compiler.lir.Variable; 57 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 58 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 59 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp; 60 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 61 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp; 62 import org.graalvm.compiler.lir.amd64.AMD64Call; 63 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 64 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp; 65 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp; 66 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp; 67 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp; 68 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp; 69 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp; 70 import org.graalvm.compiler.lir.amd64.AMD64Move; 71 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp; 72 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp; 73 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp; 74 import org.graalvm.compiler.lir.amd64.AMD64PauseOp; 75 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp; 76 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp; 77 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 78 import org.graalvm.compiler.lir.gen.LIRGenerator; 79 import org.graalvm.compiler.phases.util.Providers; 80 81 import jdk.vm.ci.amd64.AMD64; 82 import jdk.vm.ci.amd64.AMD64Kind; 83 import jdk.vm.ci.code.CallingConvention; 84 import jdk.vm.ci.code.Register; 85 import jdk.vm.ci.code.RegisterValue; 86 import jdk.vm.ci.code.StackSlot; 87 import jdk.vm.ci.meta.AllocatableValue; 88 import jdk.vm.ci.meta.JavaConstant; 89 import jdk.vm.ci.meta.JavaKind; 90 import jdk.vm.ci.meta.PlatformKind; 91 import jdk.vm.ci.meta.VMConstant; 92 import jdk.vm.ci.meta.Value; 93 import jdk.vm.ci.meta.ValueKind; 94 95 /** 96 * This class implements the AMD64 specific portion of the LIR generator. 97 */ 98 public abstract class AMD64LIRGenerator extends LIRGenerator { 99 100 public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) { 101 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes); 102 } 103 104 /** 105 * Checks whether the supplied constant can be used without loading it into a register for store 106 * operations, i.e., on the right hand side of a memory access. 107 * 108 * @param c The constant to check. 109 * @return True if the constant can be used directly, false if the constant needs to be in a 110 * register. 111 */ 112 protected static final boolean canStoreConstant(JavaConstant c) { 113 // there is no immediate move of 64-bit constants on Intel 114 switch (c.getJavaKind()) { 115 case Long: { 116 long l = c.asLong(); 117 return (int) l == l; 118 } 119 case Double: 120 return false; 121 case Object: 122 return c.isNull(); 123 default: 124 return true; 125 } 126 } 127 128 @Override 129 protected JavaConstant zapValueForKind(PlatformKind kind) { 130 long dead = 0xDEADDEADDEADDEADL; 131 switch ((AMD64Kind) kind) { 132 case BYTE: 133 return JavaConstant.forByte((byte) dead); 134 case WORD: 135 return JavaConstant.forShort((short) dead); 136 case DWORD: 137 return JavaConstant.forInt((int) dead); 138 case QWORD: 139 return JavaConstant.forLong(dead); 140 case SINGLE: 141 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead)); 142 default: 143 // we don't support vector types, so just zap with double for all of them 144 return JavaConstant.forDouble(Double.longBitsToDouble(dead)); 145 } 146 } 147 148 public AMD64AddressValue asAddressValue(Value address) { 149 if (address instanceof AMD64AddressValue) { 150 return (AMD64AddressValue) address; 151 } else { 152 if (address instanceof JavaConstant) { 153 long displacement = ((JavaConstant) address).asLong(); 154 if (NumUtil.isInt(displacement)) { 155 return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement); 156 } 157 } 158 return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0); 159 } 160 } 161 162 @Override 163 public Variable emitAddress(AllocatableValue stackslot) { 164 Variable result = newVariable(LIRKind.value(target().arch.getWordKind())); 165 append(new StackLeaOp(result, stackslot)); 166 return result; 167 } 168 169 /** 170 * The AMD64 backend only uses DWORD and QWORD values in registers because of a performance 171 * penalty when accessing WORD or BYTE registers. This function converts small integer kinds to 172 * DWORD. 173 */ 174 @Override 175 public <K extends ValueKind<K>> K toRegisterKind(K kind) { 176 switch ((AMD64Kind) kind.getPlatformKind()) { 177 case BYTE: 178 case WORD: 179 return kind.changeType(AMD64Kind.DWORD); 180 default: 181 return kind; 182 } 183 } 184 185 @Override 186 public Variable emitCompareAndSwap(Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 187 ValueKind<?> kind = newValue.getValueKind(); 188 assert kind.equals(expectedValue.getValueKind()); 189 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 190 191 AMD64AddressValue addressValue = asAddressValue(address); 192 RegisterValue raxRes = AMD64.rax.asValue(kind); 193 emitMove(raxRes, expectedValue); 194 append(new CompareAndSwapOp(memKind, raxRes, addressValue, raxRes, asAllocatable(newValue))); 195 196 assert trueValue.getValueKind().equals(falseValue.getValueKind()); 197 Variable result = newVariable(trueValue.getValueKind()); 198 append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue)); 199 return result; 200 } 201 202 @Override 203 public Value emitAtomicReadAndAdd(Value address, Value delta) { 204 ValueKind<?> kind = delta.getValueKind(); 205 Variable result = newVariable(kind); 206 AMD64AddressValue addressValue = asAddressValue(address); 207 append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta))); 208 return result; 209 } 210 211 @Override 212 public Value emitAtomicReadAndWrite(Value address, Value newValue) { 213 ValueKind<?> kind = newValue.getValueKind(); 214 Variable result = newVariable(kind); 215 AMD64AddressValue addressValue = asAddressValue(address); 216 append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue))); 217 return result; 218 } 219 220 @Override 221 public void emitNullCheck(Value address, LIRFrameState state) { 222 append(new AMD64Move.NullCheckOp(asAddressValue(address), state)); 223 } 224 225 @Override 226 public void emitJump(LabelRef label) { 227 assert label != null; 228 append(new JumpOp(label)); 229 } 230 231 @Override 232 public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) { 233 boolean mirrored = emitCompare(cmpKind, left, right); 234 Condition finalCondition = mirrored ? cond.mirror() : cond; 235 if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) { 236 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 237 } else { 238 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 239 } 240 } 241 242 public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, 243 double trueLabelProbability) { 244 boolean mirrored = emitCompareMemory(cmpKind, left, right, state); 245 Condition finalCondition = mirrored ? cond.mirror() : cond; 246 if (cmpKind.isXMM()) { 247 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 248 } else { 249 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 250 } 251 } 252 253 @Override 254 public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) { 255 append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability)); 256 } 257 258 @Override 259 public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 260 emitIntegerTest(left, right); 261 append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability)); 262 } 263 264 @Override 265 public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) { 266 boolean mirrored = emitCompare(cmpKind, left, right); 267 Condition finalCondition = mirrored ? cond.mirror() : cond; 268 269 Variable result = newVariable(trueValue.getValueKind()); 270 if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) { 271 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(trueValue), load(falseValue))); 272 } else { 273 append(new CondMoveOp(result, finalCondition, load(trueValue), loadNonConst(falseValue))); 274 } 275 return result; 276 } 277 278 @Override 279 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) { 280 emitIntegerTest(left, right); 281 Variable result = newVariable(trueValue.getValueKind()); 282 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue))); 283 return result; 284 } 285 286 private void emitIntegerTest(Value a, Value b) { 287 assert ((AMD64Kind) a.getPlatformKind()).isInteger(); 288 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD; 289 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) { 290 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong())); 291 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) { 292 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong())); 293 } else if (isAllocatableValue(b)) { 294 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a))); 295 } else { 296 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b))); 297 } 298 } 299 300 /** 301 * This method emits the compare against memory instruction, and may reorder the operands. It 302 * returns true if it did so. 303 * 304 * @param b the right operand of the comparison 305 * @return true if the left and right operands were switched, false otherwise 306 */ 307 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) { 308 OperandSize size; 309 switch (cmpKind) { 310 case BYTE: 311 size = OperandSize.BYTE; 312 break; 313 case WORD: 314 size = OperandSize.WORD; 315 break; 316 case DWORD: 317 size = OperandSize.DWORD; 318 break; 319 case QWORD: 320 size = OperandSize.QWORD; 321 break; 322 case SINGLE: 323 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state)); 324 return false; 325 case DOUBLE: 326 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state)); 327 return false; 328 default: 329 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 330 } 331 332 if (isJavaConstant(a)) { 333 return emitCompareMemoryConOp(size, asConstantValue(a), b, state); 334 } else { 335 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 336 } 337 } 338 339 protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) { 340 if (JavaConstant.isNull(a.getConstant())) { 341 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state)); 342 return true; 343 } else if (a.getConstant() instanceof VMConstant && size == DWORD) { 344 VMConstant vc = (VMConstant) a.getConstant(); 345 append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state)); 346 return true; 347 } else { 348 long value = a.getJavaConstant().asLong(); 349 if (NumUtil.is32bit(value)) { 350 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state)); 351 return true; 352 } else { 353 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 354 } 355 } 356 } 357 358 private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) { 359 AMD64RMOp op = CMP.getRMOpcode(size); 360 append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state)); 361 return false; 362 } 363 364 /** 365 * This method emits the compare instruction, and may reorder the operands. It returns true if 366 * it did so. 367 * 368 * @param a the left operand of the comparison 369 * @param b the right operand of the comparison 370 * @return true if the left and right operands were switched, false otherwise 371 */ 372 private boolean emitCompare(PlatformKind cmpKind, Value a, Value b) { 373 Variable left; 374 Value right; 375 boolean mirrored; 376 if (LIRValueUtil.isVariable(b)) { 377 left = load(b); 378 right = loadNonConst(a); 379 mirrored = true; 380 } else { 381 left = load(a); 382 right = loadNonConst(b); 383 mirrored = false; 384 } 385 ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, left, right); 386 return mirrored; 387 } 388 389 @Override 390 public void emitMembar(int barriers) { 391 int necessaryBarriers = target().arch.requiredBarriers(barriers); 392 if (target().isMP && necessaryBarriers != 0) { 393 append(new MembarOp(necessaryBarriers)); 394 } 395 } 396 397 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments); 398 399 @Override 400 protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) { 401 long maxOffset = linkage.getMaxCallTargetOffset(); 402 if (maxOffset != (int) maxOffset && !GeneratePIC.getValue()) { 403 append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info)); 404 } else { 405 append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info)); 406 } 407 } 408 409 @Override 410 public Variable emitByteSwap(Value input) { 411 Variable result = newVariable(LIRKind.combine(input)); 412 append(new AMD64ByteSwapOp(result, input)); 413 return result; 414 } 415 416 @Override 417 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) { 418 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 419 append(new AMD64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length))); 420 return result; 421 } 422 423 @Override 424 public void emitReturn(JavaKind kind, Value input) { 425 AllocatableValue operand = Value.ILLEGAL; 426 if (input != null) { 427 operand = resultOperandFor(kind, input.getValueKind()); 428 emitMove(operand, input); 429 } 430 append(new ReturnOp(operand)); 431 } 432 433 protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) { 434 return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp); 435 } 436 437 @Override 438 public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) { 439 // a temp is needed for loading object constants 440 boolean needsTemp = !LIRKind.isValue(key); 441 append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL)); 442 } 443 444 @Override 445 protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { 446 append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind()))); 447 } 448 449 @Override 450 public void emitPause() { 451 append(new AMD64PauseOp()); 452 } 453 454 @Override 455 public SaveRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) { 456 return new AMD64ZapRegistersOp(zappedRegisters, zapValues); 457 } 458 459 @Override 460 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) { 461 return new AMD64ZapStackOp(zappedStack, zapValues); 462 } 463 }