1 /* 2 * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 43 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 44 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 45 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 46 47 import org.graalvm.compiler.asm.amd64.AMD64Assembler; 48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 49 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 50 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 51 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 52 import org.graalvm.compiler.core.common.LIRKind; 53 import org.graalvm.compiler.core.common.NumUtil; 54 import org.graalvm.compiler.core.common.calc.CanonicalCondition; 55 import org.graalvm.compiler.core.common.calc.Condition; 56 import org.graalvm.compiler.core.gen.NodeLIRBuilder; 57 import org.graalvm.compiler.core.gen.NodeMatchRules; 58 import org.graalvm.compiler.core.match.ComplexMatchResult; 59 import org.graalvm.compiler.core.match.MatchRule; 60 import org.graalvm.compiler.debug.GraalError; 61 import org.graalvm.compiler.lir.LIRFrameState; 62 import org.graalvm.compiler.lir.LIRValueUtil; 63 import org.graalvm.compiler.lir.LabelRef; 64 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 65 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 66 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 67 import org.graalvm.compiler.lir.gen.LIRGeneratorTool; 68 import org.graalvm.compiler.nodes.ConstantNode; 69 import org.graalvm.compiler.nodes.DeoptimizingNode; 70 import org.graalvm.compiler.nodes.IfNode; 71 import org.graalvm.compiler.nodes.NodeView; 72 import org.graalvm.compiler.nodes.ValueNode; 73 import org.graalvm.compiler.nodes.calc.CompareNode; 74 import org.graalvm.compiler.nodes.calc.FloatConvertNode; 75 import org.graalvm.compiler.nodes.calc.LeftShiftNode; 76 import org.graalvm.compiler.nodes.calc.NarrowNode; 77 import org.graalvm.compiler.nodes.calc.ReinterpretNode; 78 import org.graalvm.compiler.nodes.calc.SignExtendNode; 79 import org.graalvm.compiler.nodes.calc.UnsignedRightShiftNode; 80 import org.graalvm.compiler.nodes.calc.ZeroExtendNode; 81 import org.graalvm.compiler.nodes.java.LogicCompareAndSwapNode; 82 import org.graalvm.compiler.nodes.java.ValueCompareAndSwapNode; 83 import org.graalvm.compiler.nodes.memory.Access; 84 import org.graalvm.compiler.nodes.memory.LIRLowerableAccess; 85 import org.graalvm.compiler.nodes.memory.WriteNode; 86 import org.graalvm.compiler.nodes.util.GraphUtil; 87 88 import jdk.vm.ci.amd64.AMD64; 89 import jdk.vm.ci.amd64.AMD64.CPUFeature; 90 import jdk.vm.ci.amd64.AMD64Kind; 91 import jdk.vm.ci.meta.AllocatableValue; 92 import jdk.vm.ci.meta.JavaConstant; 93 import jdk.vm.ci.meta.JavaKind; 94 import jdk.vm.ci.meta.PlatformKind; 95 import jdk.vm.ci.meta.Value; 96 import jdk.vm.ci.meta.ValueKind; 97 98 public class AMD64NodeMatchRules extends NodeMatchRules { 99 100 public AMD64NodeMatchRules(LIRGeneratorTool gen) { 101 super(gen); 102 } 103 104 protected LIRFrameState getState(Access access) { 105 if (access instanceof DeoptimizingNode) { 106 return state((DeoptimizingNode) access); 107 } 108 return null; 109 } 110 111 protected AMD64Kind getMemoryKind(LIRLowerableAccess access) { 112 return (AMD64Kind) getLirKind(access).getPlatformKind(); 113 } 114 115 protected LIRKind getLirKind(LIRLowerableAccess access) { 116 return gen.getLIRKind(access.getAccessStamp()); 117 } 118 119 protected OperandSize getMemorySize(LIRLowerableAccess access) { 120 switch (getMemoryKind(access)) { 121 case BYTE: 122 return OperandSize.BYTE; 123 case WORD: 124 return OperandSize.WORD; 125 case DWORD: 126 return OperandSize.DWORD; 127 case QWORD: 128 return OperandSize.QWORD; 129 case SINGLE: 130 return OperandSize.SS; 131 case DOUBLE: 132 return OperandSize.SD; 133 default: 134 throw GraalError.shouldNotReachHere("unsupported memory access type " + getMemoryKind(access)); 135 } 136 } 137 138 protected ComplexMatchResult emitCompareBranchMemory(IfNode ifNode, CompareNode compare, ValueNode value, LIRLowerableAccess access) { 139 Condition cond = compare.condition().asCondition(); 140 AMD64Kind kind = getMemoryKind(access); 141 boolean matchedAsConstant = false; // For assertion checking 142 143 if (value.isConstant()) { 144 JavaConstant constant = value.asJavaConstant(); 145 if (constant != null) { 146 if (kind == AMD64Kind.QWORD && !constant.getJavaKind().isObject() && !NumUtil.isInt(constant.asLong())) { 147 // Only imm32 as long 148 return null; 149 } 150 // A QWORD that can be encoded as int can be embedded as a constant 151 matchedAsConstant = kind == AMD64Kind.QWORD && !constant.getJavaKind().isObject() && NumUtil.isInt(constant.asLong()); 152 } 153 if (kind == AMD64Kind.DWORD) { 154 // Any DWORD value should be embeddable as a constant 155 matchedAsConstant = true; 156 } 157 if (kind.isXMM()) { 158 ifNode.getDebug().log("Skipping constant compares for float kinds"); 159 return null; 160 } 161 } 162 boolean matchedAsConstantFinal = matchedAsConstant; 163 164 /* 165 * emitCompareBranchMemory expects the memory on the right, so mirror the condition if 166 * that's not true. It might be mirrored again the actual compare is emitted but that's ok. 167 */ 168 Condition finalCondition = GraphUtil.unproxify(compare.getX()) == access ? cond.mirror() : cond; 169 return new ComplexMatchResult() { 170 @Override 171 public Value evaluate(NodeLIRBuilder builder) { 172 LabelRef trueLabel = getLIRBlock(ifNode.trueSuccessor()); 173 LabelRef falseLabel = getLIRBlock(ifNode.falseSuccessor()); 174 boolean unorderedIsTrue = compare.unorderedIsTrue(); 175 double trueLabelProbability = ifNode.probability(ifNode.trueSuccessor()); 176 Value other = operand(value); 177 /* 178 * Check that patterns which were matched as a constant actually end up seeing a 179 * constant in the LIR. 180 */ 181 assert !matchedAsConstantFinal || !LIRValueUtil.isVariable(other) : "expected constant value " + value; 182 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 183 getLIRGeneratorTool().emitCompareBranchMemory(kind, other, address, getState(access), finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability); 184 return null; 185 } 186 }; 187 } 188 189 private ComplexMatchResult emitIntegerTestBranchMemory(IfNode x, ValueNode value, LIRLowerableAccess access) { 190 LabelRef trueLabel = getLIRBlock(x.trueSuccessor()); 191 LabelRef falseLabel = getLIRBlock(x.falseSuccessor()); 192 double trueLabelProbability = x.probability(x.trueSuccessor()); 193 AMD64Kind kind = getMemoryKind(access); 194 OperandSize size = kind == AMD64Kind.QWORD ? QWORD : DWORD; 195 if (value.isConstant()) { 196 JavaConstant constant = value.asJavaConstant(); 197 if (constant != null && kind == AMD64Kind.QWORD && !NumUtil.isInt(constant.asLong())) { 198 // Only imm32 as long 199 return null; 200 } 201 return builder -> { 202 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 203 gen.append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.TEST, size, address, (int) constant.asLong(), getState(access))); 204 gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability)); 205 return null; 206 }; 207 } else { 208 return builder -> { 209 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 210 gen.append(new AMD64BinaryConsumer.MemoryRMOp(AMD64RMOp.TEST, size, gen.asAllocatable(operand(value)), address, getState(access))); 211 gen.append(new BranchOp(Condition.EQ, trueLabel, falseLabel, trueLabelProbability)); 212 return null; 213 }; 214 } 215 } 216 217 protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, Access access, ValueKind<?> addressKind) { 218 return builder -> { 219 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 220 LIRFrameState state = getState(access); 221 if (addressKind != null) { 222 address = address.withKind(addressKind); 223 } 224 return getArithmeticLIRGenerator().emitConvertMemoryOp(kind, op, size, address, state); 225 }; 226 } 227 228 protected ComplexMatchResult emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, Access access) { 229 return emitConvertMemoryOp(kind, op, size, access, null); 230 } 231 232 private ComplexMatchResult emitSignExtendMemory(Access access, int fromBits, int toBits, ValueKind<?> addressKind) { 233 assert fromBits <= toBits && toBits <= 64; 234 AMD64Kind kind = null; 235 AMD64RMOp op; 236 OperandSize size; 237 if (fromBits == toBits) { 238 return null; 239 } else if (toBits > 32) { 240 kind = AMD64Kind.QWORD; 241 size = OperandSize.QWORD; 242 // sign extend to 64 bits 243 switch (fromBits) { 244 case 8: 245 op = MOVSXB; 246 break; 247 case 16: 248 op = MOVSX; 249 break; 250 case 32: 251 op = MOVSXD; 252 break; 253 default: 254 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 255 } 256 } else { 257 kind = AMD64Kind.DWORD; 258 size = OperandSize.DWORD; 259 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 260 switch (fromBits) { 261 case 8: 262 op = MOVSXB; 263 break; 264 case 16: 265 op = MOVSX; 266 break; 267 case 32: 268 return null; 269 default: 270 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 271 } 272 } 273 if (kind != null && op != null) { 274 return emitConvertMemoryOp(kind, op, size, access, addressKind); 275 } 276 return null; 277 } 278 279 private Value emitReinterpretMemory(LIRKind to, Access access) { 280 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 281 LIRFrameState state = getState(access); 282 return getArithmeticLIRGenerator().emitLoad(to, address, state); 283 } 284 285 private boolean supports(CPUFeature feature) { 286 return ((AMD64) getLIRGeneratorTool().target().arch).getFeatures().contains(feature); 287 } 288 289 @MatchRule("(And (Not a) b)") 290 public ComplexMatchResult logicalAndNot(ValueNode a, ValueNode b) { 291 if (!supports(CPUFeature.BMI1)) { 292 return null; 293 } 294 return builder -> getArithmeticLIRGenerator().emitLogicalAndNot(operand(a), operand(b)); 295 } 296 297 @MatchRule("(And a (Negate a))") 298 public ComplexMatchResult lowestSetIsolatedBit(ValueNode a) { 299 if (!supports(CPUFeature.BMI1)) { 300 return null; 301 } 302 return builder -> getArithmeticLIRGenerator().emitLowestSetIsolatedBit(operand(a)); 303 } 304 305 @MatchRule("(Xor a (Add a b))") 306 public ComplexMatchResult getMaskUpToLowestSetBit(ValueNode a, ValueNode b) { 307 if (!supports(CPUFeature.BMI1)) { 308 return null; 309 } 310 311 // Make sure that the pattern matches a subtraction by one. 312 if (!b.isJavaConstant()) { 313 return null; 314 } 315 316 JavaConstant bCst = b.asJavaConstant(); 317 long bValue; 318 if (bCst.getJavaKind() == JavaKind.Int) { 319 bValue = bCst.asInt(); 320 } else if (bCst.getJavaKind() == JavaKind.Long) { 321 bValue = bCst.asLong(); 322 } else { 323 return null; 324 } 325 326 if (bValue == -1) { 327 return builder -> getArithmeticLIRGenerator().emitGetMaskUpToLowestSetBit(operand(a)); 328 } else { 329 return null; 330 } 331 } 332 333 @MatchRule("(And a (Add a b))") 334 public ComplexMatchResult resetLowestSetBit(ValueNode a, ValueNode b) { 335 if (!supports(CPUFeature.BMI1)) { 336 return null; 337 } 338 // Make sure that the pattern matches a subtraction by one. 339 if (!b.isJavaConstant()) { 340 return null; 341 } 342 343 JavaConstant bCst = b.asJavaConstant(); 344 long bValue; 345 if (bCst.getJavaKind() == JavaKind.Int) { 346 bValue = bCst.asInt(); 347 } else if (bCst.getJavaKind() == JavaKind.Long) { 348 bValue = bCst.asLong(); 349 } else { 350 return null; 351 } 352 353 if (bValue == -1) { 354 return builder -> getArithmeticLIRGenerator().emitResetLowestSetBit(operand(a)); 355 } else { 356 return null; 357 } 358 } 359 360 @MatchRule("(If (IntegerTest Read=access value))") 361 @MatchRule("(If (IntegerTest FloatingRead=access value))") 362 public ComplexMatchResult integerTestBranchMemory(IfNode root, LIRLowerableAccess access, ValueNode value) { 363 return emitIntegerTestBranchMemory(root, value, access); 364 } 365 366 @MatchRule("(If (IntegerEquals=compare value Read=access))") 367 @MatchRule("(If (IntegerLessThan=compare value Read=access))") 368 @MatchRule("(If (IntegerBelow=compare value Read=access))") 369 @MatchRule("(If (IntegerEquals=compare value FloatingRead=access))") 370 @MatchRule("(If (IntegerLessThan=compare value FloatingRead=access))") 371 @MatchRule("(If (IntegerBelow=compare value FloatingRead=access))") 372 @MatchRule("(If (FloatEquals=compare value Read=access))") 373 @MatchRule("(If (FloatEquals=compare value FloatingRead=access))") 374 @MatchRule("(If (FloatLessThan=compare value Read=access))") 375 @MatchRule("(If (FloatLessThan=compare value FloatingRead=access))") 376 @MatchRule("(If (PointerEquals=compare value Read=access))") 377 @MatchRule("(If (PointerEquals=compare value FloatingRead=access))") 378 @MatchRule("(If (ObjectEquals=compare value Read=access))") 379 @MatchRule("(If (ObjectEquals=compare value FloatingRead=access))") 380 public ComplexMatchResult ifCompareMemory(IfNode root, CompareNode compare, ValueNode value, LIRLowerableAccess access) { 381 return emitCompareBranchMemory(root, compare, value, access); 382 } 383 384 @MatchRule("(If (ObjectEquals=compare value ValueCompareAndSwap=cas))") 385 @MatchRule("(If (PointerEquals=compare value ValueCompareAndSwap=cas))") 386 @MatchRule("(If (FloatEquals=compare value ValueCompareAndSwap=cas))") 387 @MatchRule("(If (IntegerEquals=compare value ValueCompareAndSwap=cas))") 388 public ComplexMatchResult ifCompareValueCas(IfNode root, CompareNode compare, ValueNode value, ValueCompareAndSwapNode cas) { 389 assert compare.condition() == CanonicalCondition.EQ; 390 if (value == cas.getExpectedValue() && cas.hasExactlyOneUsage()) { 391 return builder -> { 392 LIRKind kind = getLirKind(cas); 393 LabelRef trueLabel = getLIRBlock(root.trueSuccessor()); 394 LabelRef falseLabel = getLIRBlock(root.falseSuccessor()); 395 double trueLabelProbability = root.probability(root.trueSuccessor()); 396 Value expectedValue = operand(cas.getExpectedValue()); 397 Value newValue = operand(cas.getNewValue()); 398 AMD64AddressValue address = (AMD64AddressValue) operand(cas.getAddress()); 399 getLIRGeneratorTool().emitCompareAndSwapBranch(kind, address, expectedValue, newValue, Condition.EQ, trueLabel, falseLabel, trueLabelProbability); 400 return null; 401 }; 402 } 403 return null; 404 } 405 406 @MatchRule("(If (ObjectEquals=compare value LogicCompareAndSwap=cas))") 407 @MatchRule("(If (PointerEquals=compare value LogicCompareAndSwap=cas))") 408 @MatchRule("(If (FloatEquals=compare value LogicCompareAndSwap=cas))") 409 @MatchRule("(If (IntegerEquals=compare value LogicCompareAndSwap=cas))") 410 public ComplexMatchResult ifCompareLogicCas(IfNode root, CompareNode compare, ValueNode value, LogicCompareAndSwapNode cas) { 411 JavaConstant constant = value.asJavaConstant(); 412 assert compare.condition() == CanonicalCondition.EQ; 413 if (constant != null && cas.hasExactlyOneUsage()) { 414 long constantValue = constant.asLong(); 415 boolean successIsTrue; 416 if (constantValue == 0) { 417 successIsTrue = false; 418 } else if (constantValue == 1) { 419 successIsTrue = true; 420 } else { 421 return null; 422 } 423 return builder -> { 424 LIRKind kind = getLirKind(cas); 425 LabelRef trueLabel = getLIRBlock(root.trueSuccessor()); 426 LabelRef falseLabel = getLIRBlock(root.falseSuccessor()); 427 double trueLabelProbability = root.probability(root.trueSuccessor()); 428 Value expectedValue = operand(cas.getExpectedValue()); 429 Value newValue = operand(cas.getNewValue()); 430 AMD64AddressValue address = (AMD64AddressValue) operand(cas.getAddress()); 431 Condition condition = successIsTrue ? Condition.EQ : Condition.NE; 432 getLIRGeneratorTool().emitCompareAndSwapBranch(kind, address, expectedValue, newValue, condition, trueLabel, falseLabel, trueLabelProbability); 433 return null; 434 }; 435 } 436 return null; 437 } 438 439 @MatchRule("(If (ObjectEquals=compare value FloatingRead=access))") 440 public ComplexMatchResult ifLogicCas(IfNode root, CompareNode compare, ValueNode value, LIRLowerableAccess access) { 441 return emitCompareBranchMemory(root, compare, value, access); 442 } 443 444 @MatchRule("(Or (LeftShift=lshift value Constant) (UnsignedRightShift=rshift value Constant))") 445 public ComplexMatchResult rotateLeftConstant(LeftShiftNode lshift, UnsignedRightShiftNode rshift) { 446 if ((lshift.getShiftAmountMask() & (lshift.getY().asJavaConstant().asInt() + rshift.getY().asJavaConstant().asInt())) == 0) { 447 return builder -> getArithmeticLIRGenerator().emitRol(operand(lshift.getX()), operand(lshift.getY())); 448 } 449 return null; 450 } 451 452 @MatchRule("(Or (LeftShift value (Sub Constant=delta shiftAmount)) (UnsignedRightShift value shiftAmount))") 453 public ComplexMatchResult rotateRightVariable(ValueNode value, ConstantNode delta, ValueNode shiftAmount) { 454 if (delta.asJavaConstant().asLong() == 0 || delta.asJavaConstant().asLong() == 32) { 455 return builder -> getArithmeticLIRGenerator().emitRor(operand(value), operand(shiftAmount)); 456 } 457 return null; 458 } 459 460 @MatchRule("(Or (LeftShift value shiftAmount) (UnsignedRightShift value (Sub Constant=delta shiftAmount)))") 461 public ComplexMatchResult rotateLeftVariable(ValueNode value, ValueNode shiftAmount, ConstantNode delta) { 462 if (delta.asJavaConstant().asLong() == 0 || delta.asJavaConstant().asLong() == 32) { 463 return builder -> getArithmeticLIRGenerator().emitRol(operand(value), operand(shiftAmount)); 464 } 465 return null; 466 } 467 468 private ComplexMatchResult binaryRead(AMD64RMOp op, OperandSize size, ValueNode value, LIRLowerableAccess access) { 469 return builder -> getArithmeticLIRGenerator().emitBinaryMemory(op, size, getLIRGeneratorTool().asAllocatable(operand(value)), (AMD64AddressValue) operand(access.getAddress()), 470 getState(access)); 471 } 472 473 private ComplexMatchResult binaryRead(AMD64Assembler.VexRVMOp op, OperandSize size, ValueNode value, LIRLowerableAccess access) { 474 assert size == SS || size == SD; 475 return builder -> getArithmeticLIRGenerator().emitBinaryMemory(op, size, getLIRGeneratorTool().asAllocatable(operand(value)), (AMD64AddressValue) operand(access.getAddress()), 476 getState(access)); 477 } 478 479 @MatchRule("(Add value Read=access)") 480 @MatchRule("(Add value FloatingRead=access)") 481 public ComplexMatchResult addMemory(ValueNode value, LIRLowerableAccess access) { 482 OperandSize size = getMemorySize(access); 483 if (size.isXmmType()) { 484 if (getArithmeticLIRGenerator().supportAVX()) { 485 return binaryRead(size == SS ? VADDSS : VADDSD, size, value, access); 486 } else { 487 return binaryRead(SSEOp.ADD, size, value, access); 488 } 489 } else { 490 return binaryRead(ADD.getRMOpcode(size), size, value, access); 491 } 492 } 493 494 @MatchRule("(Sub value Read=access)") 495 @MatchRule("(Sub value FloatingRead=access)") 496 public ComplexMatchResult subMemory(ValueNode value, LIRLowerableAccess access) { 497 OperandSize size = getMemorySize(access); 498 if (size.isXmmType()) { 499 if (getArithmeticLIRGenerator().supportAVX()) { 500 return binaryRead(size == SS ? VSUBSS : VSUBSD, size, value, access); 501 } else { 502 return binaryRead(SSEOp.SUB, size, value, access); 503 } 504 } else { 505 return binaryRead(SUB.getRMOpcode(size), size, value, access); 506 } 507 } 508 509 @MatchRule("(Mul value Read=access)") 510 @MatchRule("(Mul value FloatingRead=access)") 511 public ComplexMatchResult mulMemory(ValueNode value, LIRLowerableAccess access) { 512 OperandSize size = getMemorySize(access); 513 if (size.isXmmType()) { 514 if (getArithmeticLIRGenerator().supportAVX()) { 515 return binaryRead(size == SS ? VMULSS : VMULSD, size, value, access); 516 } else { 517 return binaryRead(SSEOp.MUL, size, value, access); 518 } 519 } else { 520 return binaryRead(AMD64RMOp.IMUL, size, value, access); 521 } 522 } 523 524 @MatchRule("(And value Read=access)") 525 @MatchRule("(And value FloatingRead=access)") 526 public ComplexMatchResult andMemory(ValueNode value, LIRLowerableAccess access) { 527 OperandSize size = getMemorySize(access); 528 if (size.isXmmType()) { 529 return null; 530 } else { 531 return binaryRead(AND.getRMOpcode(size), size, value, access); 532 } 533 } 534 535 @MatchRule("(Or value Read=access)") 536 @MatchRule("(Or value FloatingRead=access)") 537 public ComplexMatchResult orMemory(ValueNode value, LIRLowerableAccess access) { 538 OperandSize size = getMemorySize(access); 539 if (size.isXmmType()) { 540 return null; 541 } else { 542 return binaryRead(OR.getRMOpcode(size), size, value, access); 543 } 544 } 545 546 @MatchRule("(Xor value Read=access)") 547 @MatchRule("(Xor value FloatingRead=access)") 548 public ComplexMatchResult xorMemory(ValueNode value, LIRLowerableAccess access) { 549 OperandSize size = getMemorySize(access); 550 if (size.isXmmType()) { 551 return null; 552 } else { 553 return binaryRead(XOR.getRMOpcode(size), size, value, access); 554 } 555 } 556 557 @MatchRule("(Write object Narrow=narrow)") 558 public ComplexMatchResult writeNarrow(WriteNode root, NarrowNode narrow) { 559 return builder -> { 560 LIRKind writeKind = getLIRGeneratorTool().getLIRKind(root.value().stamp(NodeView.DEFAULT)); 561 getArithmeticLIRGenerator().emitStore(writeKind, operand(root.getAddress()), operand(narrow.getValue()), state(root)); 562 return null; 563 }; 564 } 565 566 @MatchRule("(SignExtend Read=access)") 567 @MatchRule("(SignExtend FloatingRead=access)") 568 public ComplexMatchResult signExtend(SignExtendNode root, LIRLowerableAccess access) { 569 return emitSignExtendMemory(access, root.getInputBits(), root.getResultBits(), null); 570 } 571 572 @MatchRule("(ZeroExtend Read=access)") 573 @MatchRule("(ZeroExtend FloatingRead=access)") 574 public ComplexMatchResult zeroExtend(ZeroExtendNode root, LIRLowerableAccess access) { 575 AMD64Kind memoryKind = getMemoryKind(access); 576 return builder -> getArithmeticLIRGenerator().emitZeroExtendMemory(memoryKind, root.getResultBits(), (AMD64AddressValue) operand(access.getAddress()), getState(access)); 577 } 578 579 @MatchRule("(Narrow Read=access)") 580 @MatchRule("(Narrow FloatingRead=access)") 581 public ComplexMatchResult narrowRead(NarrowNode root, LIRLowerableAccess access) { 582 return new ComplexMatchResult() { 583 @Override 584 public Value evaluate(NodeLIRBuilder builder) { 585 AMD64AddressValue address = (AMD64AddressValue) operand(access.getAddress()); 586 LIRKind addressKind = LIRKind.combineDerived(getLIRGeneratorTool().getLIRKind(root.asNode().stamp(NodeView.DEFAULT)), 587 address.getBase(), address.getIndex()); 588 AMD64AddressValue newAddress = address.withKind(addressKind); 589 LIRKind readKind = getLIRGeneratorTool().getLIRKind(root.stamp(NodeView.DEFAULT)); 590 return getArithmeticLIRGenerator().emitZeroExtendMemory((AMD64Kind) readKind.getPlatformKind(), 591 root.getResultBits(), newAddress, getState(access)); 592 } 593 }; 594 } 595 596 @MatchRule("(SignExtend (Narrow=narrow Read=access))") 597 @MatchRule("(SignExtend (Narrow=narrow FloatingRead=access))") 598 public ComplexMatchResult signExtendNarrowRead(SignExtendNode root, NarrowNode narrow, LIRLowerableAccess access) { 599 LIRKind kind = getLIRGeneratorTool().getLIRKind(narrow.stamp(NodeView.DEFAULT)); 600 return emitSignExtendMemory(access, narrow.getResultBits(), root.getResultBits(), kind); 601 } 602 603 @MatchRule("(FloatConvert Read=access)") 604 @MatchRule("(FloatConvert FloatingRead=access)") 605 public ComplexMatchResult floatConvert(FloatConvertNode root, LIRLowerableAccess access) { 606 switch (root.getFloatConvert()) { 607 case D2F: 608 return emitConvertMemoryOp(AMD64Kind.SINGLE, SSEOp.CVTSD2SS, SD, access); 609 case D2I: 610 return emitConvertMemoryOp(AMD64Kind.DWORD, SSEOp.CVTTSD2SI, DWORD, access); 611 case D2L: 612 return emitConvertMemoryOp(AMD64Kind.QWORD, SSEOp.CVTTSD2SI, QWORD, access); 613 case F2D: 614 return emitConvertMemoryOp(AMD64Kind.DOUBLE, SSEOp.CVTSS2SD, SS, access); 615 case F2I: 616 return emitConvertMemoryOp(AMD64Kind.DWORD, SSEOp.CVTTSS2SI, DWORD, access); 617 case F2L: 618 return emitConvertMemoryOp(AMD64Kind.QWORD, SSEOp.CVTTSS2SI, QWORD, access); 619 case I2D: 620 return emitConvertMemoryOp(AMD64Kind.DOUBLE, SSEOp.CVTSI2SD, DWORD, access); 621 case I2F: 622 return emitConvertMemoryOp(AMD64Kind.SINGLE, SSEOp.CVTSI2SS, DWORD, access); 623 case L2D: 624 return emitConvertMemoryOp(AMD64Kind.DOUBLE, SSEOp.CVTSI2SD, QWORD, access); 625 case L2F: 626 return emitConvertMemoryOp(AMD64Kind.SINGLE, SSEOp.CVTSI2SS, QWORD, access); 627 default: 628 throw GraalError.shouldNotReachHere(); 629 } 630 } 631 632 @MatchRule("(Reinterpret Read=access)") 633 @MatchRule("(Reinterpret FloatingRead=access)") 634 public ComplexMatchResult reinterpret(ReinterpretNode root, LIRLowerableAccess access) { 635 return builder -> { 636 LIRKind kind = getLIRGeneratorTool().getLIRKind(root.stamp(NodeView.DEFAULT)); 637 return emitReinterpretMemory(kind, access); 638 }; 639 640 } 641 642 @MatchRule("(Write object Reinterpret=reinterpret)") 643 public ComplexMatchResult writeReinterpret(WriteNode root, ReinterpretNode reinterpret) { 644 return builder -> { 645 LIRKind kind = getLIRGeneratorTool().getLIRKind(reinterpret.getValue().stamp(NodeView.DEFAULT)); 646 AllocatableValue value = getLIRGeneratorTool().asAllocatable(operand(reinterpret.getValue())); 647 648 AMD64AddressValue address = (AMD64AddressValue) operand(root.getAddress()); 649 getArithmeticLIRGenerator().emitStore((AMD64Kind) kind.getPlatformKind(), address, value, getState(root)); 650 return null; 651 }; 652 } 653 654 @Override 655 public AMD64LIRGenerator getLIRGeneratorTool() { 656 return (AMD64LIRGenerator) gen; 657 } 658 659 protected AMD64ArithmeticLIRGenerator getArithmeticLIRGenerator() { 660 return (AMD64ArithmeticLIRGenerator) getLIRGeneratorTool().getArithmetic(); 661 } 662 }