1 /* 2 * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD; 57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS; 58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD; 59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS; 60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD; 61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS; 62 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD; 63 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS; 64 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD; 65 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS; 66 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD; 67 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS; 68 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE; 69 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 70 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 71 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 72 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 73 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 74 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 75 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD; 76 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 77 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 78 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 79 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 80 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 81 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 82 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 83 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW; 84 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.COS; 85 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP; 86 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG; 87 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG10; 88 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.SIN; 89 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.TAN; 90 91 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 92 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 93 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 94 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 95 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 96 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 97 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 98 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 99 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp; 100 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp; 101 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp; 102 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 103 import org.graalvm.compiler.asm.amd64.AVXKind; 104 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 105 import org.graalvm.compiler.core.common.LIRKind; 106 import org.graalvm.compiler.core.common.NumUtil; 107 import org.graalvm.compiler.core.common.calc.FloatConvert; 108 import org.graalvm.compiler.debug.GraalError; 109 import org.graalvm.compiler.lir.ConstantValue; 110 import org.graalvm.compiler.lir.LIRFrameState; 111 import org.graalvm.compiler.lir.LIRValueUtil; 112 import org.graalvm.compiler.lir.Variable; 113 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 114 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 115 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 116 import org.graalvm.compiler.lir.amd64.AMD64Binary; 117 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 118 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 119 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp; 120 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp; 121 import org.graalvm.compiler.lir.amd64.AMD64Move; 122 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 123 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 124 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 125 import org.graalvm.compiler.lir.amd64.AMD64Unary; 126 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary; 127 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp; 128 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary; 129 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 130 import org.graalvm.compiler.lir.gen.LIRGenerator; 131 132 import jdk.vm.ci.amd64.AMD64; 133 import jdk.vm.ci.amd64.AMD64.CPUFeature; 134 import jdk.vm.ci.amd64.AMD64Kind; 135 import jdk.vm.ci.code.CodeUtil; 136 import jdk.vm.ci.code.Register; 137 import jdk.vm.ci.code.RegisterValue; 138 import jdk.vm.ci.code.TargetDescription; 139 import jdk.vm.ci.meta.AllocatableValue; 140 import jdk.vm.ci.meta.Constant; 141 import jdk.vm.ci.meta.JavaConstant; 142 import jdk.vm.ci.meta.JavaKind; 143 import jdk.vm.ci.meta.PlatformKind; 144 import jdk.vm.ci.meta.VMConstant; 145 import jdk.vm.ci.meta.Value; 146 import jdk.vm.ci.meta.ValueKind; 147 148 /** 149 * This class implements the AMD64 specific portion of the LIR generator. 150 */ 151 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 152 153 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 154 155 public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue, Maths maths) { 156 this.nullRegisterValue = nullRegisterValue; 157 this.maths = maths == null ? new Maths() { 158 } : maths; 159 } 160 161 private final AllocatableValue nullRegisterValue; 162 private final Maths maths; 163 164 /** 165 * Interface for emitting LIR for selected {@link Math} routines. A {@code null} return value 166 * for any method in this interface means the caller must emit the LIR itself. 167 */ 168 public interface Maths { 169 170 @SuppressWarnings("unused") 171 default Variable emitLog(LIRGenerator gen, Value input, boolean base10) { 172 return null; 173 } 174 175 @SuppressWarnings("unused") 176 default Variable emitCos(LIRGenerator gen, Value input) { 177 return null; 178 } 179 180 @SuppressWarnings("unused") 181 default Variable emitSin(LIRGenerator gen, Value input) { 182 return null; 183 } 184 185 @SuppressWarnings("unused") 186 default Variable emitTan(LIRGenerator gen, Value input) { 187 return null; 188 } 189 } 190 191 @Override 192 public Variable emitNegate(Value inputVal) { 193 AllocatableValue input = asAllocatable(inputVal); 194 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 195 boolean isAvx = supportAVX(); 196 switch ((AMD64Kind) input.getPlatformKind()) { 197 case DWORD: 198 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 199 break; 200 case QWORD: 201 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 202 break; 203 case SINGLE: 204 JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)); 205 if (isAvx) { 206 getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask)))); 207 } else { 208 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16)); 209 } 210 break; 211 case DOUBLE: 212 JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)); 213 if (isAvx) { 214 getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask)))); 215 } else { 216 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16)); 217 } 218 break; 219 default: 220 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString()); 221 } 222 return result; 223 } 224 225 @Override 226 public Variable emitNot(Value inputVal) { 227 AllocatableValue input = asAllocatable(inputVal); 228 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 229 switch ((AMD64Kind) input.getPlatformKind()) { 230 case DWORD: 231 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 232 break; 233 case QWORD: 234 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 235 break; 236 default: 237 throw GraalError.shouldNotReachHere(); 238 } 239 return result; 240 } 241 242 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 243 if (isJavaConstant(b)) { 244 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(a), asConstantValue(b), setFlags); 245 } else if (commutative && isJavaConstant(a)) { 246 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(b), asConstantValue(a), setFlags); 247 } else { 248 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b)); 249 } 250 } 251 252 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 253 if (isJavaConstant(b)) { 254 return emitBinaryConst(resultKind, op, size, asAllocatable(a), asJavaConstant(b)); 255 } else if (commutative && isJavaConstant(a)) { 256 return emitBinaryConst(resultKind, op, size, asAllocatable(b), asJavaConstant(a)); 257 } else { 258 return emitBinaryVar(resultKind, op, size, commutative, asAllocatable(a), asAllocatable(b)); 259 } 260 } 261 262 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 263 long value = b.getJavaConstant().asLong(); 264 if (NumUtil.isInt(value)) { 265 Variable result = getLIRGen().newVariable(resultKind); 266 int constant = (int) value; 267 268 if (!setFlags) { 269 AMD64MOp mop = getMOp(op, constant); 270 if (mop != null) { 271 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 272 return result; 273 } 274 } 275 276 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 277 return result; 278 } else { 279 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, asAllocatable(b)); 280 } 281 } 282 283 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 284 if (constant == 1) { 285 if (op.equals(AMD64BinaryArithmetic.ADD)) { 286 return AMD64MOp.INC; 287 } 288 if (op.equals(AMD64BinaryArithmetic.SUB)) { 289 return AMD64MOp.DEC; 290 } 291 } else if (constant == -1) { 292 if (op.equals(AMD64BinaryArithmetic.ADD)) { 293 return AMD64MOp.DEC; 294 } 295 if (op.equals(AMD64BinaryArithmetic.SUB)) { 296 return AMD64MOp.INC; 297 } 298 } 299 return null; 300 } 301 302 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 303 Variable result = getLIRGen().newVariable(resultKind); 304 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 305 return result; 306 } 307 308 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 309 Variable result = getLIRGen().newVariable(resultKind); 310 if (commutative) { 311 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 312 } else { 313 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 314 } 315 return result; 316 } 317 318 @Override 319 protected boolean isNumericInteger(PlatformKind kind) { 320 return ((AMD64Kind) kind).isInteger(); 321 } 322 323 private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) { 324 Variable result = getLIRGen().newVariable(resultKind); 325 AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset); 326 getLIRGen().append(new AMD64Move.LeaOp(result, address, size)); 327 return result; 328 } 329 330 @Override 331 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 332 boolean isAvx = supportAVX(); 333 switch ((AMD64Kind) a.getPlatformKind()) { 334 case DWORD: 335 if (isJavaConstant(b) && !setFlags) { 336 long displacement = asJavaConstant(b).asLong(); 337 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 338 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD); 339 } 340 } 341 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 342 case QWORD: 343 if (isJavaConstant(b) && !setFlags) { 344 long displacement = asJavaConstant(b).asLong(); 345 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 346 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD); 347 } 348 } 349 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 350 case SINGLE: 351 if (isAvx) { 352 return emitBinary(resultKind, VADDSS, a, b); 353 } else { 354 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 355 } 356 case DOUBLE: 357 if (isAvx) { 358 return emitBinary(resultKind, VADDSD, a, b); 359 } else { 360 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 361 } 362 default: 363 throw GraalError.shouldNotReachHere(); 364 } 365 } 366 367 @Override 368 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 369 boolean isAvx = supportAVX(); 370 switch ((AMD64Kind) a.getPlatformKind()) { 371 case DWORD: 372 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 373 case QWORD: 374 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 375 case SINGLE: 376 if (isAvx) { 377 return emitBinary(resultKind, VSUBSS, a, b); 378 } else { 379 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 380 } 381 case DOUBLE: 382 if (isAvx) { 383 return emitBinary(resultKind, VSUBSD, a, b); 384 } else { 385 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 386 } 387 default: 388 throw GraalError.shouldNotReachHere(); 389 } 390 } 391 392 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 393 long value = b.getJavaConstant().asLong(); 394 if (NumUtil.isInt(value)) { 395 int imm = (int) value; 396 AMD64RMIOp op; 397 if (NumUtil.isByte(imm)) { 398 op = AMD64RMIOp.IMUL_SX; 399 } else { 400 op = AMD64RMIOp.IMUL; 401 } 402 403 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 404 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 405 return ret; 406 } else { 407 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b)); 408 } 409 } 410 411 private Variable emitIMUL(OperandSize size, Value a, Value b) { 412 if (isJavaConstant(b)) { 413 return emitIMULConst(size, asAllocatable(a), asConstantValue(b)); 414 } else if (isJavaConstant(a)) { 415 return emitIMULConst(size, asAllocatable(b), asConstantValue(a)); 416 } else { 417 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b)); 418 } 419 } 420 421 @Override 422 public Variable emitMul(Value a, Value b, boolean setFlags) { 423 boolean isAvx = supportAVX(); 424 LIRKind resultKind = LIRKind.combine(a, b); 425 switch ((AMD64Kind) a.getPlatformKind()) { 426 case DWORD: 427 return emitIMUL(DWORD, a, b); 428 case QWORD: 429 return emitIMUL(QWORD, a, b); 430 case SINGLE: 431 if (isAvx) { 432 return emitBinary(resultKind, VMULSS, a, b); 433 } else { 434 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 435 } 436 case DOUBLE: 437 if (isAvx) { 438 return emitBinary(resultKind, VMULSD, a, b); 439 } else { 440 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 441 } 442 default: 443 throw GraalError.shouldNotReachHere(); 444 } 445 } 446 447 private RegisterValue moveToReg(Register reg, Value v) { 448 RegisterValue ret = reg.asValue(v.getValueKind()); 449 getLIRGen().emitMove(ret, v); 450 return ret; 451 } 452 453 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 454 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b))); 455 return getLIRGen().emitMove(mulHigh.getHighResult()); 456 } 457 458 @Override 459 public Value emitMulHigh(Value a, Value b) { 460 switch ((AMD64Kind) a.getPlatformKind()) { 461 case DWORD: 462 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 463 case QWORD: 464 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 465 default: 466 throw GraalError.shouldNotReachHere(); 467 } 468 } 469 470 @Override 471 public Value emitUMulHigh(Value a, Value b) { 472 switch ((AMD64Kind) a.getPlatformKind()) { 473 case DWORD: 474 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 475 case QWORD: 476 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 477 default: 478 throw GraalError.shouldNotReachHere(); 479 } 480 } 481 482 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 483 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 484 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 485 return result; 486 } 487 488 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 489 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 490 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 491 return result; 492 } 493 494 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 495 // Issue a zero extending load of the proper bit size and set the result to 496 // the proper kind. 497 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits <= 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 498 switch (memoryKind) { 499 case BYTE: 500 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 501 break; 502 case WORD: 503 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 504 break; 505 case DWORD: 506 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 507 break; 508 case QWORD: 509 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 510 break; 511 default: 512 throw GraalError.shouldNotReachHere(); 513 } 514 return result; 515 } 516 517 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 518 LIRKind kind = LIRKind.combine(a, b); 519 520 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 521 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state)); 522 } 523 524 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 525 LIRKind kind = LIRKind.combine(a, b); 526 527 RegisterValue rax = moveToReg(AMD64.rax, a); 528 RegisterValue rdx = AMD64.rdx.asValue(kind); 529 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 530 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state)); 531 } 532 533 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 534 AMD64MulDivOp op; 535 switch ((AMD64Kind) a.getPlatformKind()) { 536 case DWORD: 537 op = emitIDIV(DWORD, a, b, state); 538 break; 539 case QWORD: 540 op = emitIDIV(QWORD, a, b, state); 541 break; 542 default: 543 throw GraalError.shouldNotReachHere(); 544 } 545 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 546 } 547 548 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 549 AMD64MulDivOp op; 550 switch ((AMD64Kind) a.getPlatformKind()) { 551 case DWORD: 552 op = emitDIV(DWORD, a, b, state); 553 break; 554 case QWORD: 555 op = emitDIV(QWORD, a, b, state); 556 break; 557 default: 558 throw GraalError.shouldNotReachHere(); 559 } 560 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 561 } 562 563 @Override 564 public Value emitDiv(Value a, Value b, LIRFrameState state) { 565 boolean isAvx = supportAVX(); 566 LIRKind resultKind = LIRKind.combine(a, b); 567 switch ((AMD64Kind) a.getPlatformKind()) { 568 case DWORD: 569 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 570 return getLIRGen().emitMove(op.getQuotient()); 571 case QWORD: 572 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 573 return getLIRGen().emitMove(lop.getQuotient()); 574 case SINGLE: 575 if (isAvx) { 576 return emitBinary(resultKind, VDIVSS, a, b); 577 } else { 578 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 579 } 580 case DOUBLE: 581 if (isAvx) { 582 return emitBinary(resultKind, VDIVSD, a, b); 583 } else { 584 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 585 } 586 default: 587 throw GraalError.shouldNotReachHere(); 588 } 589 } 590 591 @Override 592 public Value emitRem(Value a, Value b, LIRFrameState state) { 593 switch ((AMD64Kind) a.getPlatformKind()) { 594 case DWORD: 595 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 596 return getLIRGen().emitMove(op.getRemainder()); 597 case QWORD: 598 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 599 return getLIRGen().emitMove(lop.getRemainder()); 600 case SINGLE: { 601 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 602 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 603 return result; 604 } 605 case DOUBLE: { 606 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 607 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 608 return result; 609 } 610 default: 611 throw GraalError.shouldNotReachHere(); 612 } 613 } 614 615 @Override 616 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 617 AMD64MulDivOp op; 618 switch ((AMD64Kind) a.getPlatformKind()) { 619 case DWORD: 620 op = emitDIV(DWORD, a, b, state); 621 break; 622 case QWORD: 623 op = emitDIV(QWORD, a, b, state); 624 break; 625 default: 626 throw GraalError.shouldNotReachHere(); 627 } 628 return getLIRGen().emitMove(op.getQuotient()); 629 } 630 631 @Override 632 public Variable emitURem(Value a, Value b, LIRFrameState state) { 633 AMD64MulDivOp op; 634 switch ((AMD64Kind) a.getPlatformKind()) { 635 case DWORD: 636 op = emitDIV(DWORD, a, b, state); 637 break; 638 case QWORD: 639 op = emitDIV(QWORD, a, b, state); 640 break; 641 default: 642 throw GraalError.shouldNotReachHere(); 643 } 644 return getLIRGen().emitMove(op.getRemainder()); 645 } 646 647 @Override 648 public Variable emitAnd(Value a, Value b) { 649 LIRKind resultKind = LIRKind.combine(a, b); 650 switch ((AMD64Kind) a.getPlatformKind()) { 651 case DWORD: 652 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 653 case QWORD: 654 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 655 case SINGLE: 656 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 657 case DOUBLE: 658 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 659 default: 660 throw GraalError.shouldNotReachHere(); 661 } 662 } 663 664 @Override 665 public Variable emitOr(Value a, Value b) { 666 boolean isAvx = supportAVX(); 667 LIRKind resultKind = LIRKind.combine(a, b); 668 switch ((AMD64Kind) a.getPlatformKind()) { 669 case DWORD: 670 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 671 case QWORD: 672 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 673 case SINGLE: 674 if (isAvx) { 675 return emitBinary(resultKind, VORPS, a, b); 676 } else { 677 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 678 } 679 case DOUBLE: 680 if (isAvx) { 681 return emitBinary(resultKind, VORPD, a, b); 682 } else { 683 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 684 } 685 default: 686 throw GraalError.shouldNotReachHere(); 687 } 688 } 689 690 @Override 691 public Variable emitXor(Value a, Value b) { 692 boolean isAvx = supportAVX(); 693 LIRKind resultKind = LIRKind.combine(a, b); 694 switch ((AMD64Kind) a.getPlatformKind()) { 695 case DWORD: 696 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 697 case QWORD: 698 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 699 case SINGLE: 700 if (isAvx) { 701 return emitBinary(resultKind, VXORPS, a, b); 702 } else { 703 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 704 } 705 case DOUBLE: 706 if (isAvx) { 707 return emitBinary(resultKind, VXORPD, a, b); 708 } else { 709 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 710 } 711 default: 712 throw GraalError.shouldNotReachHere(); 713 } 714 } 715 716 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 717 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 718 AllocatableValue input = asAllocatable(a); 719 if (isJavaConstant(b)) { 720 JavaConstant c = asJavaConstant(b); 721 if (c.asLong() == 1) { 722 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 723 } else { 724 /* 725 * c is implicitly masked to 5 or 6 bits by the CPU, so casting it to (int) is 726 * always correct, even without the NumUtil.is32bit() test. 727 */ 728 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (int) c.asLong())); 729 } 730 } else { 731 getLIRGen().emitMove(RCX_I, b); 732 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 733 } 734 return result; 735 } 736 737 @Override 738 public Variable emitShl(Value a, Value b) { 739 switch ((AMD64Kind) a.getPlatformKind()) { 740 case DWORD: 741 return emitShift(SHL, DWORD, a, b); 742 case QWORD: 743 return emitShift(SHL, QWORD, a, b); 744 default: 745 throw GraalError.shouldNotReachHere(); 746 } 747 } 748 749 @Override 750 public Variable emitShr(Value a, Value b) { 751 switch ((AMD64Kind) a.getPlatformKind()) { 752 case DWORD: 753 return emitShift(SAR, DWORD, a, b); 754 case QWORD: 755 return emitShift(SAR, QWORD, a, b); 756 default: 757 throw GraalError.shouldNotReachHere(); 758 } 759 } 760 761 @Override 762 public Variable emitUShr(Value a, Value b) { 763 switch ((AMD64Kind) a.getPlatformKind()) { 764 case DWORD: 765 return emitShift(SHR, DWORD, a, b); 766 case QWORD: 767 return emitShift(SHR, QWORD, a, b); 768 default: 769 throw GraalError.shouldNotReachHere(); 770 } 771 } 772 773 public Variable emitRol(Value a, Value b) { 774 switch ((AMD64Kind) a.getPlatformKind()) { 775 case DWORD: 776 return emitShift(ROL, DWORD, a, b); 777 case QWORD: 778 return emitShift(ROL, QWORD, a, b); 779 default: 780 throw GraalError.shouldNotReachHere(); 781 } 782 } 783 784 public Variable emitRor(Value a, Value b) { 785 switch ((AMD64Kind) a.getPlatformKind()) { 786 case DWORD: 787 return emitShift(ROR, DWORD, a, b); 788 case QWORD: 789 return emitShift(ROR, QWORD, a, b); 790 default: 791 throw GraalError.shouldNotReachHere(); 792 } 793 } 794 795 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 796 Variable result = getLIRGen().newVariable(kind); 797 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, asAllocatable(input))); 798 return result; 799 } 800 801 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 802 Variable result = getLIRGen().newVariable(kind); 803 getLIRGen().append(new AMD64Unary.MROp(op, size, result, asAllocatable(input))); 804 return result; 805 } 806 807 @Override 808 public Value emitReinterpret(LIRKind to, Value inputVal) { 809 ValueKind<?> from = inputVal.getValueKind(); 810 if (to.equals(from)) { 811 return inputVal; 812 } 813 814 AllocatableValue input = asAllocatable(inputVal); 815 /* 816 * Conversions between integer to floating point types require moves between CPU and FPU 817 * registers. 818 */ 819 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 820 switch ((AMD64Kind) to.getPlatformKind()) { 821 case DWORD: 822 switch (fromKind) { 823 case SINGLE: 824 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 825 } 826 break; 827 case QWORD: 828 switch (fromKind) { 829 case DOUBLE: 830 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 831 } 832 break; 833 case SINGLE: 834 switch (fromKind) { 835 case DWORD: 836 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 837 } 838 break; 839 case DOUBLE: 840 switch (fromKind) { 841 case QWORD: 842 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 843 } 844 break; 845 } 846 throw GraalError.shouldNotReachHere(); 847 } 848 849 @Override 850 public Value emitFloatConvert(FloatConvert op, Value input) { 851 switch (op) { 852 case D2F: 853 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 854 case D2I: 855 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 856 case D2L: 857 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 858 case F2D: 859 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 860 case F2I: 861 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 862 case F2L: 863 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 864 case I2D: 865 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 866 case I2F: 867 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 868 case L2D: 869 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 870 case L2F: 871 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 872 default: 873 throw GraalError.shouldNotReachHere(); 874 } 875 } 876 877 @Override 878 public Value emitNarrow(Value inputVal, int bits) { 879 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 880 // TODO make it possible to reinterpret Long as Int in LIR without move 881 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 882 } else { 883 return inputVal; 884 } 885 } 886 887 @Override 888 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 889 assert fromBits <= toBits && toBits <= 64; 890 if (fromBits == toBits) { 891 return inputVal; 892 } else if (toBits > 32) { 893 // sign extend to 64 bits 894 switch (fromBits) { 895 case 8: 896 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 897 case 16: 898 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 899 case 32: 900 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 901 default: 902 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 903 } 904 } else { 905 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 906 switch (fromBits) { 907 case 8: 908 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 909 case 16: 910 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 911 case 32: 912 return inputVal; 913 default: 914 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 915 } 916 } 917 } 918 919 @Override 920 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 921 assert fromBits <= toBits && toBits <= 64; 922 if (fromBits == toBits) { 923 return inputVal; 924 } else if (fromBits > 32) { 925 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 926 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 927 long mask = CodeUtil.mask(fromBits); 928 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask))); 929 return result; 930 } else { 931 LIRKind resultKind = LIRKind.combine(inputVal); 932 if (toBits > 32) { 933 resultKind = resultKind.changeType(AMD64Kind.QWORD); 934 } else { 935 resultKind = resultKind.changeType(AMD64Kind.DWORD); 936 } 937 938 /* 939 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 940 * operations implicitly set the upper half of the register to 0, which is what we want 941 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 942 * sometimes one byte shorter. 943 */ 944 switch (fromBits) { 945 case 8: 946 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 947 case 16: 948 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 949 case 32: 950 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 951 } 952 953 // odd bit count, fall back on manual masking 954 Variable result = getLIRGen().newVariable(resultKind); 955 JavaConstant mask; 956 if (toBits > 32) { 957 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 958 } else { 959 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 960 } 961 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), mask)); 962 return result; 963 } 964 } 965 966 @Override 967 public Variable emitBitCount(Value value) { 968 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 969 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 970 if (value.getPlatformKind() == AMD64Kind.QWORD) { 971 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, asAllocatable(value))); 972 } else { 973 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, asAllocatable(value))); 974 } 975 return result; 976 } 977 978 @Override 979 public Variable emitBitScanForward(Value value) { 980 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 981 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, asAllocatable(value))); 982 return result; 983 } 984 985 @Override 986 public Variable emitBitScanReverse(Value value) { 987 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 988 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 989 if (value.getPlatformKind() == AMD64Kind.QWORD) { 990 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, asAllocatable(value))); 991 } else { 992 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, asAllocatable(value))); 993 } 994 return result; 995 } 996 997 @Override 998 public Value emitCountLeadingZeros(Value value) { 999 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 1000 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 1001 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1002 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, asAllocatable(value))); 1003 } else { 1004 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, asAllocatable(value))); 1005 } 1006 return result; 1007 } 1008 1009 @Override 1010 public Value emitCountTrailingZeros(Value value) { 1011 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 1012 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 1013 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1014 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, asAllocatable(value))); 1015 } else { 1016 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, asAllocatable(value))); 1017 } 1018 return result; 1019 } 1020 1021 @Override 1022 public Value emitLogicalAndNot(Value value1, Value value2) { 1023 Variable result = getLIRGen().newVariable(LIRKind.combine(value1, value2)); 1024 1025 if (value1.getPlatformKind() == AMD64Kind.QWORD) { 1026 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.QWORD, result, asAllocatable(value1), asAllocatable(value2))); 1027 } else { 1028 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.DWORD, result, asAllocatable(value1), asAllocatable(value2))); 1029 } 1030 return result; 1031 } 1032 1033 @Override 1034 public Value emitLowestSetIsolatedBit(Value value) { 1035 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1036 1037 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1038 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.QWORD, result, asAllocatable(value))); 1039 } else { 1040 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.DWORD, result, asAllocatable(value))); 1041 } 1042 1043 return result; 1044 } 1045 1046 @Override 1047 public Value emitGetMaskUpToLowestSetBit(Value value) { 1048 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1049 1050 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1051 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.QWORD, result, asAllocatable(value))); 1052 } else { 1053 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.DWORD, result, asAllocatable(value))); 1054 } 1055 1056 return result; 1057 } 1058 1059 @Override 1060 public Value emitResetLowestSetBit(Value value) { 1061 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1062 1063 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1064 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.QWORD, result, asAllocatable(value))); 1065 } else { 1066 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.DWORD, result, asAllocatable(value))); 1067 } 1068 1069 return result; 1070 } 1071 1072 @Override 1073 public Value emitMathAbs(Value input) { 1074 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1075 switch ((AMD64Kind) input.getPlatformKind()) { 1076 case SINGLE: 1077 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 1078 break; 1079 case DOUBLE: 1080 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 1081 break; 1082 default: 1083 throw GraalError.shouldNotReachHere(); 1084 } 1085 return result; 1086 } 1087 1088 @Override 1089 public Value emitMathSqrt(Value input) { 1090 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1091 switch ((AMD64Kind) input.getPlatformKind()) { 1092 case SINGLE: 1093 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, asAllocatable(input))); 1094 break; 1095 case DOUBLE: 1096 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, asAllocatable(input))); 1097 break; 1098 default: 1099 throw GraalError.shouldNotReachHere(); 1100 } 1101 return result; 1102 } 1103 1104 @Override 1105 public Value emitMathLog(Value input, boolean base10) { 1106 LIRGenerator gen = getLIRGen(); 1107 Variable result = maths.emitLog(gen, input, base10); 1108 if (result == null) { 1109 result = gen.newVariable(LIRKind.combine(input)); 1110 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1111 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), base10 ? LOG10 : LOG, result, asAllocatable(input), stackSlot)); 1112 } 1113 return result; 1114 } 1115 1116 @Override 1117 public Value emitMathCos(Value input) { 1118 LIRGenerator gen = getLIRGen(); 1119 Variable result = maths.emitCos(gen, input); 1120 if (result == null) { 1121 result = gen.newVariable(LIRKind.combine(input)); 1122 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1123 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), COS, result, asAllocatable(input), stackSlot)); 1124 } 1125 return result; 1126 } 1127 1128 @Override 1129 public Value emitMathSin(Value input) { 1130 LIRGenerator gen = getLIRGen(); 1131 Variable result = maths.emitSin(gen, input); 1132 if (result == null) { 1133 result = gen.newVariable(LIRKind.combine(input)); 1134 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1135 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), SIN, result, asAllocatable(input), stackSlot)); 1136 } 1137 return result; 1138 } 1139 1140 @Override 1141 public Value emitMathTan(Value input) { 1142 LIRGenerator gen = getLIRGen(); 1143 Variable result = maths.emitTan(gen, input); 1144 if (result == null) { 1145 result = gen.newVariable(LIRKind.combine(input)); 1146 AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1147 gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), TAN, result, asAllocatable(input), stackSlot)); 1148 } 1149 return result; 1150 } 1151 1152 @Override 1153 public Value emitMathExp(Value input) { 1154 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1155 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1156 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), EXP, result, asAllocatable(input), stackSlot)); 1157 return result; 1158 } 1159 1160 @Override 1161 public Value emitMathPow(Value input1, Value input2) { 1162 Variable result = getLIRGen().newVariable(LIRKind.combine(input1)); 1163 getLIRGen().append(new AMD64MathIntrinsicBinaryOp(getAMD64LIRGen(), POW, result, asAllocatable(input1), asAllocatable(input2))); 1164 return result; 1165 } 1166 1167 protected AMD64LIRGenerator getAMD64LIRGen() { 1168 return (AMD64LIRGenerator) getLIRGen(); 1169 } 1170 1171 @Override 1172 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1173 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1174 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1175 switch ((AMD64Kind) kind.getPlatformKind()) { 1176 case BYTE: 1177 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1178 break; 1179 case WORD: 1180 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1181 break; 1182 case DWORD: 1183 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1184 break; 1185 case QWORD: 1186 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1187 break; 1188 case SINGLE: 1189 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1190 break; 1191 case DOUBLE: 1192 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1193 break; 1194 default: 1195 throw GraalError.shouldNotReachHere(); 1196 } 1197 return result; 1198 } 1199 1200 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1201 Constant c = value.getConstant(); 1202 if (JavaConstant.isNull(c)) { 1203 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1204 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1205 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1206 return; 1207 } else if (c instanceof VMConstant) { 1208 // only 32-bit constants can be patched 1209 if (kind == AMD64Kind.DWORD) { 1210 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1211 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1212 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1213 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1214 return; 1215 } 1216 } 1217 } else { 1218 JavaConstant jc = (JavaConstant) c; 1219 assert jc.getJavaKind().isPrimitive(); 1220 1221 AMD64MIOp op = AMD64MIOp.MOV; 1222 OperandSize size; 1223 long imm; 1224 1225 switch (kind) { 1226 case BYTE: 1227 op = AMD64MIOp.MOVB; 1228 size = BYTE; 1229 imm = jc.asInt(); 1230 break; 1231 case WORD: 1232 size = WORD; 1233 imm = jc.asInt(); 1234 break; 1235 case DWORD: 1236 size = DWORD; 1237 imm = jc.asInt(); 1238 break; 1239 case QWORD: 1240 size = QWORD; 1241 imm = jc.asLong(); 1242 break; 1243 case SINGLE: 1244 size = DWORD; 1245 imm = Float.floatToRawIntBits(jc.asFloat()); 1246 break; 1247 case DOUBLE: 1248 size = QWORD; 1249 imm = Double.doubleToRawLongBits(jc.asDouble()); 1250 break; 1251 default: 1252 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1253 } 1254 1255 if (NumUtil.isInt(imm)) { 1256 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1257 return; 1258 } 1259 } 1260 1261 // fallback: load, then store 1262 emitStore(kind, address, asAllocatable(value), state); 1263 } 1264 1265 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1266 switch (kind) { 1267 case BYTE: 1268 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1269 break; 1270 case WORD: 1271 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1272 break; 1273 case DWORD: 1274 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1275 break; 1276 case QWORD: 1277 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1278 break; 1279 case SINGLE: 1280 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1281 break; 1282 case DOUBLE: 1283 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1284 break; 1285 default: 1286 throw GraalError.shouldNotReachHere(); 1287 } 1288 } 1289 1290 @Override 1291 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1292 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1293 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1294 if (isConstantValue(input)) { 1295 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1296 } else { 1297 emitStore(kind, storeAddress, asAllocatable(input), state); 1298 } 1299 } 1300 1301 private boolean mustReplaceNullWithNullRegister(Constant nullConstant) { 1302 /* Uncompressed null pointers only */ 1303 return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant); 1304 } 1305 1306 @Override 1307 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1308 OperandSize size; 1309 switch (cmpKind) { 1310 case BYTE: 1311 size = BYTE; 1312 break; 1313 case WORD: 1314 size = WORD; 1315 break; 1316 case DWORD: 1317 size = DWORD; 1318 break; 1319 case QWORD: 1320 size = QWORD; 1321 break; 1322 case SINGLE: 1323 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, asAllocatable(right))); 1324 return; 1325 case DOUBLE: 1326 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, asAllocatable(right))); 1327 return; 1328 default: 1329 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1330 } 1331 1332 if (isConstantValue(right)) { 1333 Constant c = LIRValueUtil.asConstant(right); 1334 if (JavaConstant.isNull(c)) { 1335 if (mustReplaceNullWithNullRegister(c)) { 1336 getLIRGen().append(new AMD64BinaryConsumer.Op(AMD64RMOp.CMP, size, left, nullRegisterValue)); 1337 } else { 1338 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1339 } 1340 return; 1341 } else if (c instanceof VMConstant) { 1342 VMConstant vc = (VMConstant) c; 1343 if (size == DWORD && !GeneratePIC.getValue(getOptions())) { 1344 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1345 } else { 1346 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1347 } 1348 return; 1349 } else if (c instanceof JavaConstant) { 1350 JavaConstant jc = (JavaConstant) c; 1351 if (jc.isDefaultForKind()) { 1352 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1353 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1354 return; 1355 } else if (NumUtil.is32bit(jc.asLong())) { 1356 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1357 return; 1358 } 1359 } 1360 } 1361 1362 // fallback: load, then compare 1363 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right))); 1364 } 1365 1366 @Override 1367 public Value emitRound(Value value, RoundingMode mode) { 1368 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1369 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1370 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1371 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1372 } else { 1373 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1374 } 1375 return result; 1376 } 1377 1378 private boolean supportAVX() { 1379 TargetDescription target = getLIRGen().target(); 1380 return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 1381 } 1382 1383 private static AVXSize getRegisterSize(Value a) { 1384 AMD64Kind kind = (AMD64Kind) a.getPlatformKind(); 1385 if (kind.isXMM()) { 1386 return AVXKind.getRegisterSize(kind); 1387 } else { 1388 return AVXSize.XMM; 1389 } 1390 } 1391 1392 private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) { 1393 Variable result = getLIRGen().newVariable(resultKind); 1394 getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b))); 1395 return result; 1396 } 1397 1398 }