1 /* 2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 package org.graalvm.compiler.core.amd64; 25 26 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 27 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.SD; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.SS; 38 39 import org.graalvm.compiler.asm.NumUtil; 40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 41 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 42 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RRMOp; 43 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize; 44 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 45 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AVXOp; 46 import org.graalvm.compiler.core.common.LIRKind; 47 import org.graalvm.compiler.core.common.calc.Condition; 48 import org.graalvm.compiler.core.gen.NodeLIRBuilder; 49 import org.graalvm.compiler.core.gen.NodeMatchRules; 50 import org.graalvm.compiler.core.match.ComplexMatchResult; 51 import org.graalvm.compiler.core.match.MatchRule; 52 import org.graalvm.compiler.debug.Debug; 53 import org.graalvm.compiler.debug.GraalError; 54 import org.graalvm.compiler.lir.LIRFrameState; 55 import org.graalvm.compiler.lir.LabelRef; 56 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 57 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 58 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 59 import org.graalvm.compiler.lir.gen.LIRGeneratorTool; 60 import org.graalvm.compiler.nodes.ConstantNode; 61 import org.graalvm.compiler.nodes.DeoptimizingNode; 62 import org.graalvm.compiler.nodes.IfNode; 63 import org.graalvm.compiler.nodes.ValueNode; 64 import org.graalvm.compiler.nodes.calc.CompareNode; 65 import org.graalvm.compiler.nodes.calc.FloatConvertNode; 66 import org.graalvm.compiler.nodes.calc.LeftShiftNode; 67 import org.graalvm.compiler.nodes.calc.NarrowNode; 68 import org.graalvm.compiler.nodes.calc.ReinterpretNode; 69 import org.graalvm.compiler.nodes.calc.SignExtendNode; 70 import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode; 71 import org.graalvm.compiler.nodes.calc.ZeroExtendNode; 72 import org.graalvm.compiler.nodes.memory.Access; 73 import org.graalvm.compiler.nodes.memory.WriteNode; 74 import org.graalvm.compiler.nodes.util.GraphUtil; 75 76 import jdk.vm.ci.amd64.AMD64; 77 import jdk.vm.ci.amd64.AMD64Kind; 78 import jdk.vm.ci.amd64.AMD64.CPUFeature; 79 import jdk.vm.ci.code.TargetDescription; 80 import jdk.vm.ci.meta.AllocatableValue; 81 import jdk.vm.ci.meta.JavaConstant; 82 import jdk.vm.ci.meta.PlatformKind; 83 import jdk.vm.ci.meta.Value; 84 85 public class AMD64NodeMatchRules extends NodeMatchRules { 86 87 public AMD64NodeMatchRules(LIRGeneratorTool gen) { 88 super(gen); 89 } 90 91 protected LIRFrameState getState(Access access) { 92 if (access instanceof DeoptimizingNode) { 93 return state((DeoptimizingNode) access); 94 } 95 return null; 96 } 97 98 protected AMD64Kind getMemoryKind(Access access) { 99 return (AMD64Kind) gen.getLIRKind(access.asNode().stamp()).getPlatformKind(); 100 } 101 102 protected OperandSize getMemorySize(Access access) { 103 switch (getMemoryKind(access)) { 104 case BYTE: 105 return OperandSize.BYTE; 106 case WORD: 107 return OperandSize.WORD; 108 case DWORD: 109 return OperandSize.DWORD; 110 case QWORD: 111 return OperandSize.QWORD; 112 case SINGLE: 113 return OperandSize.SS; 114 case DOUBLE: 115 return OperandSize.SD; 116 default: 117 throw GraalError.shouldNotReachHere("unsupported memory access type " + getMemoryKind(access)); 118 } 119 } 120 121 protected ComplexMatchResult emitCompareBranchMemory(IfNode ifNode, CompareNode compare, ValueNode value, Access access) { 122 Condition cond = compare.condition(); 123 AMD64Kind kind = getMemoryKind(access); 124 125 if (value.isConstant()) { 126 JavaConstant constant = value.asJavaConstant(); 127 if (constant != null && kind == AMD64Kind.QWORD && !constant.getJavaKind().isObject() && !NumUtil.isInt(constant.asLong())) { 128 // Only imm32 as long 129 return null; 130 } 131 if (kind.isXMM()) { 132 Debug.log("Skipping constant compares for float kinds"); 133 return null; 134 } 135 } 136 137 // emitCompareBranchMemory expects the memory on the right, so mirror the condition if 138 // that's not true. It might be mirrored again the actual compare is emitted but that's 139 // ok. 140 Condition finalCondition = GraphUtil.unproxify(compare.getX()) == access ? cond.mirror() : cond; 141 return new ComplexMatchResult() { 142 @Override 143 public Value evaluate(NodeLIRBuilder builder) { 144 LabelRef trueLabel = getLIRBlock(ifNode.trueSuccessor()); 145 LabelRef falseLabel = getLIRBlock(ifNode.falseSuccessor()); 146 boolean unorderedIsTrue = compare.unorderedIsTrue(); 147 double trueLabelProbability = ifNode.probability(ifNode.trueSuccessor()); 148 Value other = operand(value); 149 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 150 getLIRGeneratorTool().emitCompareBranchMemory(kind, other, address, getState(access), finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability); 151 return null; 152 } 153 }; 154 } 155 156 private ComplexMatchResult emitIntegerTestBranchMemory(IfNode x, ValueNode value, Access access) { 157 LabelRef trueLabel = getLIRBlock(x.trueSuccessor()); 158 LabelRef falseLabel = getLIRBlock(x.falseSuccessor()); 159 double trueLabelProbability = x.probability(x.trueSuccessor()); 160 AMD64Kind kind = getMemoryKind(access); 161 OperandSize size = kind == AMD64Kind.QWORD ? QWORD : DWORD; 162 if (value.isConstant()) { 163 JavaConstant constant = value.asJavaConstant(); 164 if (constant != null && kind == AMD64Kind.QWORD && !NumUtil.isInt(constant.asLong())) { 165 // Only imm32 as long 166 return null; 167 } 168 return builder -> { 169 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 170 gen.append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.TEST, size, address, (int) constant.asLong(), getState(access))); 171 gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability)); 172 return null; 173 }; 174 } else { 175 return builder -> { 176 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 177 gen.append(new AMD64BinaryConsumer.MemoryRMOp(AMD64RMOp.TEST, size, gen.asAllocatable(operand(value)), address, getState(access))); 178 gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability)); 179 return null; 180 }; 181 } 182 } 183 184 protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, Access access) { 185 return builder -> { 186 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 187 LIRFrameState state = getState(access); 188 return getArithmeticLIRGenerator().emitConvertMemoryOp(kind, op, size, address, state); 189 }; 190 } 191 192 private ComplexMatchResult emitSignExtendMemory(Access access, int fromBits, int toBits) { 193 assert fromBits <= toBits && toBits <= 64; 194 AMD64Kind kind = null; 195 AMD64RMOp op; 196 OperandSize size; 197 if (fromBits == toBits) { 198 return null; 199 } else if (toBits > 32) { 200 kind = AMD64Kind.QWORD; 201 size = OperandSize.QWORD; 202 // sign extend to 64 bits 203 switch (fromBits) { 204 case 8: 205 op = MOVSXB; 206 break; 207 case 16: 208 op = MOVSX; 209 break; 210 case 32: 211 op = MOVSXD; 212 break; 213 default: 214 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 215 } 216 } else { 217 kind = AMD64Kind.DWORD; 218 size = OperandSize.DWORD; 219 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 220 switch (fromBits) { 221 case 8: 222 op = MOVSXB; 223 break; 224 case 16: 225 op = MOVSX; 226 break; 227 case 32: 228 return null; 229 default: 230 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 231 } 232 } 233 if (kind != null && op != null) { 234 return emitConvertMemoryOp(kind, op, size, access); 235 } 236 return null; 237 } 238 239 private Value emitReinterpretMemory(LIRKind to, Access access) { 240 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 241 LIRFrameState state = getState(access); 242 return getArithmeticLIRGenerator().emitLoad(to, address, state); 243 } 244 245 @MatchRule("(If (IntegerTest Read=access value))") 246 @MatchRule("(If (IntegerTest FloatingRead=access value))") 247 public ComplexMatchResult integerTestBranchMemory(IfNode root, Access access, ValueNode value) { 248 return emitIntegerTestBranchMemory(root, value, access); 249 } 250 251 @MatchRule("(If (IntegerEquals=compare value Read=access))") 252 @MatchRule("(If (IntegerLessThan=compare value Read=access))") 253 @MatchRule("(If (IntegerBelow=compare value Read=access))") 254 @MatchRule("(If (IntegerEquals=compare value FloatingRead=access))") 255 @MatchRule("(If (IntegerLessThan=compare value FloatingRead=access))") 256 @MatchRule("(If (IntegerBelow=compare value FloatingRead=access))") 257 @MatchRule("(If (FloatEquals=compare value Read=access))") 258 @MatchRule("(If (FloatEquals=compare value FloatingRead=access))") 259 @MatchRule("(If (FloatLessThan=compare value Read=access))") 260 @MatchRule("(If (FloatLessThan=compare value FloatingRead=access))") 261 @MatchRule("(If (PointerEquals=compare value Read=access))") 262 @MatchRule("(If (PointerEquals=compare value FloatingRead=access))") 263 @MatchRule("(If (ObjectEquals=compare value Read=access))") 264 @MatchRule("(If (ObjectEquals=compare value FloatingRead=access))") 265 public ComplexMatchResult ifCompareMemory(IfNode root, CompareNode compare, ValueNode value, Access access) { 266 return emitCompareBranchMemory(root, compare, value, access); 267 } 268 269 @MatchRule("(Or (LeftShift=lshift value Constant) (UnsignedRightShift=rshift value Constant))") 270 public ComplexMatchResult rotateLeftConstant(LeftShiftNode lshift, UnsignedRightShiftNode rshift) { 271 if ((lshift.getShiftAmountMask() & (lshift.getY().asJavaConstant().asInt() + rshift.getY().asJavaConstant().asInt())) == 0) { 272 return builder -> getArithmeticLIRGenerator().emitRol(operand(lshift.getX()), operand(lshift.getY())); 273 } 274 return null; 275 } 276 277 @MatchRule("(Or (LeftShift value (Sub Constant=delta shiftAmount)) (UnsignedRightShift value shiftAmount))") 278 public ComplexMatchResult rotateRightVariable(ValueNode value, ConstantNode delta, ValueNode shiftAmount) { 279 if (delta.asJavaConstant().asLong() == 0 || delta.asJavaConstant().asLong() == 32) { 280 return builder -> getArithmeticLIRGenerator().emitRor(operand(value), operand(shiftAmount)); 281 } 282 return null; 283 } 284 285 @MatchRule("(Or (LeftShift value shiftAmount) (UnsignedRightShift value (Sub Constant=delta shiftAmount)))") 286 public ComplexMatchResult rotateLeftVariable(ValueNode value, ValueNode shiftAmount, ConstantNode delta) { 287 if (delta.asJavaConstant().asLong() == 0 || delta.asJavaConstant().asLong() == 32) { 288 return builder -> getArithmeticLIRGenerator().emitRol(operand(value), operand(shiftAmount)); 289 } 290 return null; 291 } 292 293 private ComplexMatchResult binaryRead(AMD64RMOp op, OperandSize size, ValueNode value, Access access) { 294 return builder -> getArithmeticLIRGenerator().emitBinaryMemory(op, size, getLIRGeneratorTool().asAllocatable(operand(value)), (AMD64AddressValue) operand(access.getAddress()), 295 getState(access)); 296 } 297 298 private ComplexMatchResult binaryRead(AMD64RRMOp op, OperandSize size, ValueNode value, Access access) { 299 return builder -> getArithmeticLIRGenerator().emitBinaryMemory(op, size, getLIRGeneratorTool().asAllocatable(operand(value)), (AMD64AddressValue) operand(access.getAddress()), 300 getState(access)); 301 } 302 303 @MatchRule("(Add value Read=access)") 304 @MatchRule("(Add value FloatingRead=access)") 305 public ComplexMatchResult addMemory(ValueNode value, Access access) { 306 OperandSize size = getMemorySize(access); 307 if (size.isXmmType()) { 308 TargetDescription target = getLIRGeneratorTool().target(); 309 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 310 if (isAvx) { 311 return binaryRead(AVXOp.ADD, size, value, access); 312 } else { 313 return binaryRead(SSEOp.ADD, size, value, access); 314 } 315 } else { 316 return binaryRead(ADD.getRMOpcode(size), size, value, access); 317 } 318 } 319 320 @MatchRule("(Sub value Read=access)") 321 @MatchRule("(Sub value FloatingRead=access)") 322 public ComplexMatchResult subMemory(ValueNode value, Access access) { 323 OperandSize size = getMemorySize(access); 324 if (size.isXmmType()) { 325 TargetDescription target = getLIRGeneratorTool().target(); 326 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 327 if (isAvx) { 328 return binaryRead(AVXOp.SUB, size, value, access); 329 } else { 330 return binaryRead(SSEOp.SUB, size, value, access); 331 } 332 } else { 333 return binaryRead(SUB.getRMOpcode(size), size, value, access); 334 } 335 } 336 337 @MatchRule("(Mul value Read=access)") 338 @MatchRule("(Mul value FloatingRead=access)") 339 public ComplexMatchResult mulMemory(ValueNode value, Access access) { 340 OperandSize size = getMemorySize(access); 341 if (size.isXmmType()) { 342 TargetDescription target = getLIRGeneratorTool().target(); 343 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 344 if (isAvx) { 345 return binaryRead(AVXOp.MUL, size, value, access); 346 } else { 347 return binaryRead(SSEOp.MUL, size, value, access); 348 } 349 } else { 350 return binaryRead(AMD64RMOp.IMUL, size, value, access); 351 } 352 } 353 354 @MatchRule("(And value Read=access)") 355 @MatchRule("(And value FloatingRead=access)") 356 public ComplexMatchResult andMemory(ValueNode value, Access access) { 357 OperandSize size = getMemorySize(access); 358 if (size.isXmmType()) { 359 return null; 360 } else { 361 return binaryRead(AND.getRMOpcode(size), size, value, access); 362 } 363 } 364 365 @MatchRule("(Or value Read=access)") 366 @MatchRule("(Or value FloatingRead=access)") 367 public ComplexMatchResult orMemory(ValueNode value, Access access) { 368 OperandSize size = getMemorySize(access); 369 if (size.isXmmType()) { 370 return null; 371 } else { 372 return binaryRead(OR.getRMOpcode(size), size, value, access); 373 } 374 } 375 376 @MatchRule("(Xor value Read=access)") 377 @MatchRule("(Xor value FloatingRead=access)") 378 public ComplexMatchResult xorMemory(ValueNode value, Access access) { 379 OperandSize size = getMemorySize(access); 380 if (size.isXmmType()) { 381 return null; 382 } else { 383 return binaryRead(XOR.getRMOpcode(size), size, value, access); 384 } 385 } 386 387 @MatchRule("(Write object Narrow=narrow)") 388 public ComplexMatchResult writeNarrow(WriteNode root, NarrowNode narrow) { 389 return builder -> { 390 LIRKind writeKind = getLIRGeneratorTool().getLIRKind(root.value().stamp()); 391 getArithmeticLIRGenerator().emitStore(writeKind, operand(root.getAddress()), operand(narrow.getValue()), state(root)); 392 return null; 393 }; 394 } 395 396 @MatchRule("(SignExtend Read=access)") 397 @MatchRule("(SignExtend FloatingRead=access)") 398 public ComplexMatchResult signExtend(SignExtendNode root, Access access) { 399 return emitSignExtendMemory(access, root.getInputBits(), root.getResultBits()); 400 } 401 402 @MatchRule("(ZeroExtend Read=access)") 403 @MatchRule("(ZeroExtend FloatingRead=access)") 404 public ComplexMatchResult zeroExtend(ZeroExtendNode root, Access access) { 405 AMD64Kind memoryKind = getMemoryKind(access); 406 return builder -> getArithmeticLIRGenerator().emitZeroExtendMemory(memoryKind, root.getResultBits(), (AMD64AddressValue) operand(access.getAddress()), getState(access)); 407 } 408 409 @MatchRule("(FloatConvert Read=access)") 410 @MatchRule("(FloatConvert FloatingRead=access)") 411 public ComplexMatchResult floatConvert(FloatConvertNode root, Access access) { 412 switch (root.getFloatConvert()) { 413 case D2F: 414 return emitConvertMemoryOp(AMD64Kind.SINGLE, SSEOp.CVTSD2SS, SD, access); 415 case D2I: 416 return emitConvertMemoryOp(AMD64Kind.DWORD, SSEOp.CVTTSD2SI, DWORD, access); 417 case D2L: 418 return emitConvertMemoryOp(AMD64Kind.QWORD, SSEOp.CVTTSD2SI, QWORD, access); 419 case F2D: 420 return emitConvertMemoryOp(AMD64Kind.DOUBLE, SSEOp.CVTSS2SD, SS, access); 421 case F2I: 422 return emitConvertMemoryOp(AMD64Kind.DWORD, SSEOp.CVTTSS2SI, DWORD, access); 423 case F2L: 424 return emitConvertMemoryOp(AMD64Kind.QWORD, SSEOp.CVTTSS2SI, QWORD, access); 425 case I2D: 426 return emitConvertMemoryOp(AMD64Kind.DOUBLE, SSEOp.CVTSI2SD, DWORD, access); 427 case I2F: 428 return emitConvertMemoryOp(AMD64Kind.SINGLE, SSEOp.CVTSI2SS, DWORD, access); 429 case L2D: 430 return emitConvertMemoryOp(AMD64Kind.DOUBLE, SSEOp.CVTSI2SD, QWORD, access); 431 case L2F: 432 return emitConvertMemoryOp(AMD64Kind.SINGLE, SSEOp.CVTSI2SS, QWORD, access); 433 default: 434 throw GraalError.shouldNotReachHere(); 435 } 436 } 437 438 @MatchRule("(Reinterpret Read=access)") 439 @MatchRule("(Reinterpret FloatingRead=access)") 440 public ComplexMatchResult reinterpret(ReinterpretNode root, Access access) { 441 return builder -> { 442 LIRKind kind = getLIRGeneratorTool().getLIRKind(root.stamp()); 443 return emitReinterpretMemory(kind, access); 444 }; 445 446 } 447 448 @MatchRule("(Write object Reinterpret=reinterpret)") 449 public ComplexMatchResult writeReinterpret(WriteNode root, ReinterpretNode reinterpret) { 450 return builder -> { 451 LIRKind kind = getLIRGeneratorTool().getLIRKind(reinterpret.getValue().stamp()); 452 AllocatableValue value = getLIRGeneratorTool().asAllocatable(operand(reinterpret.getValue())); 453 454 AMD64AddressValue address = (AMD64AddressValue) operand(root.getAddress()); 455 getArithmeticLIRGenerator().emitStore((AMD64Kind) kind.getPlatformKind(), address, value, getState(root)); 456 return null; 457 }; 458 } 459 460 @Override 461 public AMD64LIRGenerator getLIRGeneratorTool() { 462 return (AMD64LIRGenerator) gen; 463 } 464 465 protected AMD64ArithmeticLIRGenerator getArithmeticLIRGenerator() { 466 return (AMD64ArithmeticLIRGenerator) getLIRGeneratorTool().getArithmetic(); 467 } 468 }