1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSD; 57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VADDSS; 58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSD; 59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VDIVSS; 60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSD; 61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VMULSS; 62 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPD; 63 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VORPS; 64 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSD; 65 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VSUBSS; 66 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPD; 67 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp.VXORPS; 68 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.BYTE; 69 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 70 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 71 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 72 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 73 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SD; 74 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.SS; 75 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.WORD; 76 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 77 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 78 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 79 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 80 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 81 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 82 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 83 84 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 85 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 89 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 90 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 91 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 92 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRMOp; 93 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexGeneralPurposeRVMOp; 94 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRVMOp; 95 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 96 import org.graalvm.compiler.asm.amd64.AVXKind; 97 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 98 import org.graalvm.compiler.core.common.LIRKind; 99 import org.graalvm.compiler.core.common.NumUtil; 100 import org.graalvm.compiler.core.common.calc.FloatConvert; 101 import org.graalvm.compiler.debug.GraalError; 102 import org.graalvm.compiler.lir.ConstantValue; 103 import org.graalvm.compiler.lir.LIRFrameState; 104 import org.graalvm.compiler.lir.LIRValueUtil; 105 import org.graalvm.compiler.lir.Variable; 106 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 107 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 108 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 109 import org.graalvm.compiler.lir.amd64.AMD64Binary; 110 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 111 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 112 import org.graalvm.compiler.lir.amd64.AMD64MathCosOp; 113 import org.graalvm.compiler.lir.amd64.AMD64MathExpOp; 114 import org.graalvm.compiler.lir.amd64.AMD64MathLog10Op; 115 import org.graalvm.compiler.lir.amd64.AMD64MathLogOp; 116 import org.graalvm.compiler.lir.amd64.AMD64MathPowOp; 117 import org.graalvm.compiler.lir.amd64.AMD64MathSinOp; 118 import org.graalvm.compiler.lir.amd64.AMD64MathTanOp; 119 import org.graalvm.compiler.lir.amd64.AMD64Move; 120 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 121 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 122 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 123 import org.graalvm.compiler.lir.amd64.AMD64Unary; 124 import org.graalvm.compiler.lir.amd64.AMD64ZeroMemoryOp; 125 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary; 126 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorBinary.AVXBinaryOp; 127 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorUnary; 128 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 129 130 import jdk.vm.ci.amd64.AMD64; 131 import jdk.vm.ci.amd64.AMD64.CPUFeature; 132 import jdk.vm.ci.amd64.AMD64Kind; 133 import jdk.vm.ci.code.CodeUtil; 134 import jdk.vm.ci.code.Register; 135 import jdk.vm.ci.code.RegisterValue; 136 import jdk.vm.ci.code.TargetDescription; 137 import jdk.vm.ci.meta.AllocatableValue; 138 import jdk.vm.ci.meta.Constant; 139 import jdk.vm.ci.meta.JavaConstant; 140 import jdk.vm.ci.meta.JavaKind; 141 import jdk.vm.ci.meta.PlatformKind; 142 import jdk.vm.ci.meta.VMConstant; 143 import jdk.vm.ci.meta.Value; 144 import jdk.vm.ci.meta.ValueKind; 145 146 /** 147 * This class implements the AMD64 specific portion of the LIR generator. 148 */ 149 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 150 151 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 152 153 public AMD64ArithmeticLIRGenerator(AllocatableValue nullRegisterValue) { 154 this.nullRegisterValue = nullRegisterValue; 155 } 156 157 private final AllocatableValue nullRegisterValue; 158 159 @Override 160 public Variable emitNegate(Value inputVal) { 161 AllocatableValue input = asAllocatable(inputVal); 162 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 163 boolean isAvx = supportAVX(); 164 switch ((AMD64Kind) input.getPlatformKind()) { 165 case DWORD: 166 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 167 break; 168 case QWORD: 169 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 170 break; 171 case SINGLE: 172 JavaConstant floatMask = JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)); 173 if (isAvx) { 174 getLIRGen().append(new AVXBinaryOp(VXORPS, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(floatMask)))); 175 } else { 176 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, floatMask, 16)); 177 } 178 break; 179 case DOUBLE: 180 JavaConstant doubleMask = JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)); 181 if (isAvx) { 182 getLIRGen().append(new AVXBinaryOp(VXORPD, getRegisterSize(result), result, asAllocatable(input), asAllocatable(getLIRGen().emitJavaConstant(doubleMask)))); 183 } else { 184 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, doubleMask, 16)); 185 } 186 break; 187 default: 188 throw GraalError.shouldNotReachHere(input.getPlatformKind().toString()); 189 } 190 return result; 191 } 192 193 @Override 194 public Variable emitNot(Value inputVal) { 195 AllocatableValue input = asAllocatable(inputVal); 196 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 197 switch ((AMD64Kind) input.getPlatformKind()) { 198 case DWORD: 199 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 200 break; 201 case QWORD: 202 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 203 break; 204 default: 205 throw GraalError.shouldNotReachHere(); 206 } 207 return result; 208 } 209 210 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 211 if (isJavaConstant(b)) { 212 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(a), asConstantValue(b), setFlags); 213 } else if (commutative && isJavaConstant(a)) { 214 return emitBinaryConst(resultKind, op, size, commutative, asAllocatable(b), asConstantValue(a), setFlags); 215 } else { 216 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, asAllocatable(a), asAllocatable(b)); 217 } 218 } 219 220 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 221 if (isJavaConstant(b)) { 222 return emitBinaryConst(resultKind, op, size, asAllocatable(a), asJavaConstant(b)); 223 } else if (commutative && isJavaConstant(a)) { 224 return emitBinaryConst(resultKind, op, size, asAllocatable(b), asJavaConstant(a)); 225 } else { 226 return emitBinaryVar(resultKind, op, size, commutative, asAllocatable(a), asAllocatable(b)); 227 } 228 } 229 230 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 231 long value = b.getJavaConstant().asLong(); 232 if (NumUtil.isInt(value)) { 233 Variable result = getLIRGen().newVariable(resultKind); 234 int constant = (int) value; 235 236 if (!setFlags) { 237 AMD64MOp mop = getMOp(op, constant); 238 if (mop != null) { 239 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 240 return result; 241 } 242 } 243 244 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 245 return result; 246 } else { 247 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, asAllocatable(b)); 248 } 249 } 250 251 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 252 if (constant == 1) { 253 if (op.equals(AMD64BinaryArithmetic.ADD)) { 254 return AMD64MOp.INC; 255 } 256 if (op.equals(AMD64BinaryArithmetic.SUB)) { 257 return AMD64MOp.DEC; 258 } 259 } else if (constant == -1) { 260 if (op.equals(AMD64BinaryArithmetic.ADD)) { 261 return AMD64MOp.DEC; 262 } 263 if (op.equals(AMD64BinaryArithmetic.SUB)) { 264 return AMD64MOp.INC; 265 } 266 } 267 return null; 268 } 269 270 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 271 Variable result = getLIRGen().newVariable(resultKind); 272 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 273 return result; 274 } 275 276 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 277 Variable result = getLIRGen().newVariable(resultKind); 278 if (commutative) { 279 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 280 } else { 281 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 282 } 283 return result; 284 } 285 286 @Override 287 protected boolean isNumericInteger(PlatformKind kind) { 288 return ((AMD64Kind) kind).isInteger(); 289 } 290 291 private Variable emitBaseOffsetLea(LIRKind resultKind, Value base, int offset, OperandSize size) { 292 Variable result = getLIRGen().newVariable(resultKind); 293 AMD64AddressValue address = new AMD64AddressValue(resultKind, asAllocatable(base), offset); 294 getLIRGen().append(new AMD64Move.LeaOp(result, address, size)); 295 return result; 296 } 297 298 @Override 299 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 300 boolean isAvx = supportAVX(); 301 switch ((AMD64Kind) a.getPlatformKind()) { 302 case DWORD: 303 if (isJavaConstant(b) && !setFlags) { 304 long displacement = asJavaConstant(b).asLong(); 305 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 306 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.DWORD); 307 } 308 } 309 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 310 case QWORD: 311 if (isJavaConstant(b) && !setFlags) { 312 long displacement = asJavaConstant(b).asLong(); 313 if (NumUtil.isInt(displacement) && displacement != 1 && displacement != -1) { 314 return emitBaseOffsetLea(resultKind, a, (int) displacement, OperandSize.QWORD); 315 } 316 } 317 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 318 case SINGLE: 319 if (isAvx) { 320 return emitBinary(resultKind, VADDSS, a, b); 321 } else { 322 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 323 } 324 case DOUBLE: 325 if (isAvx) { 326 return emitBinary(resultKind, VADDSD, a, b); 327 } else { 328 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 329 } 330 default: 331 throw GraalError.shouldNotReachHere(); 332 } 333 } 334 335 @Override 336 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 337 boolean isAvx = supportAVX(); 338 switch ((AMD64Kind) a.getPlatformKind()) { 339 case DWORD: 340 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 341 case QWORD: 342 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 343 case SINGLE: 344 if (isAvx) { 345 return emitBinary(resultKind, VSUBSS, a, b); 346 } else { 347 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 348 } 349 case DOUBLE: 350 if (isAvx) { 351 return emitBinary(resultKind, VSUBSD, a, b); 352 } else { 353 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 354 } 355 default: 356 throw GraalError.shouldNotReachHere(); 357 } 358 } 359 360 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 361 long value = b.getJavaConstant().asLong(); 362 if (NumUtil.isInt(value)) { 363 int imm = (int) value; 364 AMD64RMIOp op; 365 if (NumUtil.isByte(imm)) { 366 op = AMD64RMIOp.IMUL_SX; 367 } else { 368 op = AMD64RMIOp.IMUL; 369 } 370 371 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 372 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 373 return ret; 374 } else { 375 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, asAllocatable(b)); 376 } 377 } 378 379 private Variable emitIMUL(OperandSize size, Value a, Value b) { 380 if (isJavaConstant(b)) { 381 return emitIMULConst(size, asAllocatable(a), asConstantValue(b)); 382 } else if (isJavaConstant(a)) { 383 return emitIMULConst(size, asAllocatable(b), asConstantValue(a)); 384 } else { 385 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, asAllocatable(a), asAllocatable(b)); 386 } 387 } 388 389 @Override 390 public Variable emitMul(Value a, Value b, boolean setFlags) { 391 boolean isAvx = supportAVX(); 392 LIRKind resultKind = LIRKind.combine(a, b); 393 switch ((AMD64Kind) a.getPlatformKind()) { 394 case DWORD: 395 return emitIMUL(DWORD, a, b); 396 case QWORD: 397 return emitIMUL(QWORD, a, b); 398 case SINGLE: 399 if (isAvx) { 400 return emitBinary(resultKind, VMULSS, a, b); 401 } else { 402 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 403 } 404 case DOUBLE: 405 if (isAvx) { 406 return emitBinary(resultKind, VMULSD, a, b); 407 } else { 408 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 409 } 410 default: 411 throw GraalError.shouldNotReachHere(); 412 } 413 } 414 415 private RegisterValue moveToReg(Register reg, Value v) { 416 RegisterValue ret = reg.asValue(v.getValueKind()); 417 getLIRGen().emitMove(ret, v); 418 return ret; 419 } 420 421 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 422 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), asAllocatable(b))); 423 return getLIRGen().emitMove(mulHigh.getHighResult()); 424 } 425 426 @Override 427 public Value emitMulHigh(Value a, Value b) { 428 switch ((AMD64Kind) a.getPlatformKind()) { 429 case DWORD: 430 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 431 case QWORD: 432 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 433 default: 434 throw GraalError.shouldNotReachHere(); 435 } 436 } 437 438 @Override 439 public Value emitUMulHigh(Value a, Value b) { 440 switch ((AMD64Kind) a.getPlatformKind()) { 441 case DWORD: 442 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 443 case QWORD: 444 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 445 default: 446 throw GraalError.shouldNotReachHere(); 447 } 448 } 449 450 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 451 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 452 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 453 return result; 454 } 455 456 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 457 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 458 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 459 return result; 460 } 461 462 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 463 // Issue a zero extending load of the proper bit size and set the result to 464 // the proper kind. 465 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits <= 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 466 switch (memoryKind) { 467 case BYTE: 468 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 469 break; 470 case WORD: 471 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 472 break; 473 case DWORD: 474 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 475 break; 476 case QWORD: 477 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 478 break; 479 default: 480 throw GraalError.shouldNotReachHere(); 481 } 482 return result; 483 } 484 485 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 486 LIRKind kind = LIRKind.combine(a, b); 487 488 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 489 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), asAllocatable(b), state)); 490 } 491 492 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 493 LIRKind kind = LIRKind.combine(a, b); 494 495 RegisterValue rax = moveToReg(AMD64.rax, a); 496 RegisterValue rdx = AMD64.rdx.asValue(kind); 497 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 498 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, asAllocatable(b), state)); 499 } 500 501 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 502 AMD64MulDivOp op; 503 switch ((AMD64Kind) a.getPlatformKind()) { 504 case DWORD: 505 op = emitIDIV(DWORD, a, b, state); 506 break; 507 case QWORD: 508 op = emitIDIV(QWORD, a, b, state); 509 break; 510 default: 511 throw GraalError.shouldNotReachHere(); 512 } 513 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 514 } 515 516 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 517 AMD64MulDivOp op; 518 switch ((AMD64Kind) a.getPlatformKind()) { 519 case DWORD: 520 op = emitDIV(DWORD, a, b, state); 521 break; 522 case QWORD: 523 op = emitDIV(QWORD, a, b, state); 524 break; 525 default: 526 throw GraalError.shouldNotReachHere(); 527 } 528 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 529 } 530 531 @Override 532 public Value emitDiv(Value a, Value b, LIRFrameState state) { 533 boolean isAvx = supportAVX(); 534 LIRKind resultKind = LIRKind.combine(a, b); 535 switch ((AMD64Kind) a.getPlatformKind()) { 536 case DWORD: 537 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 538 return getLIRGen().emitMove(op.getQuotient()); 539 case QWORD: 540 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 541 return getLIRGen().emitMove(lop.getQuotient()); 542 case SINGLE: 543 if (isAvx) { 544 return emitBinary(resultKind, VDIVSS, a, b); 545 } else { 546 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 547 } 548 case DOUBLE: 549 if (isAvx) { 550 return emitBinary(resultKind, VDIVSD, a, b); 551 } else { 552 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 553 } 554 default: 555 throw GraalError.shouldNotReachHere(); 556 } 557 } 558 559 @Override 560 public Value emitRem(Value a, Value b, LIRFrameState state) { 561 switch ((AMD64Kind) a.getPlatformKind()) { 562 case DWORD: 563 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 564 return getLIRGen().emitMove(op.getRemainder()); 565 case QWORD: 566 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 567 return getLIRGen().emitMove(lop.getRemainder()); 568 case SINGLE: { 569 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 570 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 571 return result; 572 } 573 case DOUBLE: { 574 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 575 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 576 return result; 577 } 578 default: 579 throw GraalError.shouldNotReachHere(); 580 } 581 } 582 583 @Override 584 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 585 AMD64MulDivOp op; 586 switch ((AMD64Kind) a.getPlatformKind()) { 587 case DWORD: 588 op = emitDIV(DWORD, a, b, state); 589 break; 590 case QWORD: 591 op = emitDIV(QWORD, a, b, state); 592 break; 593 default: 594 throw GraalError.shouldNotReachHere(); 595 } 596 return getLIRGen().emitMove(op.getQuotient()); 597 } 598 599 @Override 600 public Variable emitURem(Value a, Value b, LIRFrameState state) { 601 AMD64MulDivOp op; 602 switch ((AMD64Kind) a.getPlatformKind()) { 603 case DWORD: 604 op = emitDIV(DWORD, a, b, state); 605 break; 606 case QWORD: 607 op = emitDIV(QWORD, a, b, state); 608 break; 609 default: 610 throw GraalError.shouldNotReachHere(); 611 } 612 return getLIRGen().emitMove(op.getRemainder()); 613 } 614 615 @Override 616 public Variable emitAnd(Value a, Value b) { 617 LIRKind resultKind = LIRKind.combine(a, b); 618 switch ((AMD64Kind) a.getPlatformKind()) { 619 case DWORD: 620 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 621 case QWORD: 622 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 623 case SINGLE: 624 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 625 case DOUBLE: 626 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 627 default: 628 throw GraalError.shouldNotReachHere(); 629 } 630 } 631 632 @Override 633 public Variable emitOr(Value a, Value b) { 634 boolean isAvx = supportAVX(); 635 LIRKind resultKind = LIRKind.combine(a, b); 636 switch ((AMD64Kind) a.getPlatformKind()) { 637 case DWORD: 638 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 639 case QWORD: 640 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 641 case SINGLE: 642 if (isAvx) { 643 return emitBinary(resultKind, VORPS, a, b); 644 } else { 645 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 646 } 647 case DOUBLE: 648 if (isAvx) { 649 return emitBinary(resultKind, VORPD, a, b); 650 } else { 651 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 652 } 653 default: 654 throw GraalError.shouldNotReachHere(); 655 } 656 } 657 658 @Override 659 public Variable emitXor(Value a, Value b) { 660 boolean isAvx = supportAVX(); 661 LIRKind resultKind = LIRKind.combine(a, b); 662 switch ((AMD64Kind) a.getPlatformKind()) { 663 case DWORD: 664 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 665 case QWORD: 666 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 667 case SINGLE: 668 if (isAvx) { 669 return emitBinary(resultKind, VXORPS, a, b); 670 } else { 671 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 672 } 673 case DOUBLE: 674 if (isAvx) { 675 return emitBinary(resultKind, VXORPD, a, b); 676 } else { 677 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 678 } 679 default: 680 throw GraalError.shouldNotReachHere(); 681 } 682 } 683 684 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 685 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 686 AllocatableValue input = asAllocatable(a); 687 if (isJavaConstant(b)) { 688 JavaConstant c = asJavaConstant(b); 689 if (c.asLong() == 1) { 690 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 691 } else { 692 /* 693 * c needs to be masked here, because shifts with immediate expect a byte. 694 */ 695 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (byte) c.asLong())); 696 } 697 } else { 698 getLIRGen().emitMove(RCX_I, b); 699 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 700 } 701 return result; 702 } 703 704 @Override 705 public Variable emitShl(Value a, Value b) { 706 switch ((AMD64Kind) a.getPlatformKind()) { 707 case DWORD: 708 return emitShift(SHL, DWORD, a, b); 709 case QWORD: 710 return emitShift(SHL, QWORD, a, b); 711 default: 712 throw GraalError.shouldNotReachHere(); 713 } 714 } 715 716 @Override 717 public Variable emitShr(Value a, Value b) { 718 switch ((AMD64Kind) a.getPlatformKind()) { 719 case DWORD: 720 return emitShift(SAR, DWORD, a, b); 721 case QWORD: 722 return emitShift(SAR, QWORD, a, b); 723 default: 724 throw GraalError.shouldNotReachHere(); 725 } 726 } 727 728 @Override 729 public Variable emitUShr(Value a, Value b) { 730 switch ((AMD64Kind) a.getPlatformKind()) { 731 case DWORD: 732 return emitShift(SHR, DWORD, a, b); 733 case QWORD: 734 return emitShift(SHR, QWORD, a, b); 735 default: 736 throw GraalError.shouldNotReachHere(); 737 } 738 } 739 740 public Variable emitRol(Value a, Value b) { 741 switch ((AMD64Kind) a.getPlatformKind()) { 742 case DWORD: 743 return emitShift(ROL, DWORD, a, b); 744 case QWORD: 745 return emitShift(ROL, QWORD, a, b); 746 default: 747 throw GraalError.shouldNotReachHere(); 748 } 749 } 750 751 @Override 752 public Variable emitRor(Value a, Value b) { 753 switch ((AMD64Kind) a.getPlatformKind()) { 754 case DWORD: 755 return emitShift(ROR, DWORD, a, b); 756 case QWORD: 757 return emitShift(ROR, QWORD, a, b); 758 default: 759 throw GraalError.shouldNotReachHere(); 760 } 761 } 762 763 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 764 Variable result = getLIRGen().newVariable(kind); 765 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, asAllocatable(input))); 766 return result; 767 } 768 769 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 770 Variable result = getLIRGen().newVariable(kind); 771 getLIRGen().append(new AMD64Unary.MROp(op, size, result, asAllocatable(input))); 772 return result; 773 } 774 775 @Override 776 public Value emitReinterpret(LIRKind to, Value inputVal) { 777 ValueKind<?> from = inputVal.getValueKind(); 778 if (to.equals(from)) { 779 return inputVal; 780 } 781 782 AllocatableValue input = asAllocatable(inputVal); 783 /* 784 * Conversions between integer to floating point types require moves between CPU and FPU 785 * registers. 786 */ 787 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 788 switch ((AMD64Kind) to.getPlatformKind()) { 789 case DWORD: 790 switch (fromKind) { 791 case SINGLE: 792 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 793 } 794 break; 795 case QWORD: 796 switch (fromKind) { 797 case DOUBLE: 798 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 799 } 800 break; 801 case SINGLE: 802 switch (fromKind) { 803 case DWORD: 804 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 805 } 806 break; 807 case DOUBLE: 808 switch (fromKind) { 809 case QWORD: 810 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 811 } 812 break; 813 } 814 throw GraalError.shouldNotReachHere(); 815 } 816 817 @Override 818 public Value emitFloatConvert(FloatConvert op, Value input) { 819 switch (op) { 820 case D2F: 821 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 822 case D2I: 823 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 824 case D2L: 825 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 826 case F2D: 827 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 828 case F2I: 829 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 830 case F2L: 831 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 832 case I2D: 833 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 834 case I2F: 835 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 836 case L2D: 837 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 838 case L2F: 839 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 840 default: 841 throw GraalError.shouldNotReachHere(); 842 } 843 } 844 845 @Override 846 public Value emitNarrow(Value inputVal, int bits) { 847 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 848 // TODO make it possible to reinterpret Long as Int in LIR without move 849 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 850 } else { 851 return inputVal; 852 } 853 } 854 855 @Override 856 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 857 assert fromBits <= toBits && toBits <= 64; 858 if (fromBits == toBits) { 859 return inputVal; 860 } else if (toBits > 32) { 861 // sign extend to 64 bits 862 switch (fromBits) { 863 case 8: 864 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 865 case 16: 866 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 867 case 32: 868 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 869 default: 870 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 871 } 872 } else { 873 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 874 switch (fromBits) { 875 case 8: 876 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 877 case 16: 878 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 879 case 32: 880 return inputVal; 881 default: 882 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 883 } 884 } 885 } 886 887 @Override 888 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 889 assert fromBits <= toBits && toBits <= 64; 890 if (fromBits == toBits) { 891 return inputVal; 892 } else if (fromBits > 32) { 893 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 894 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 895 long mask = CodeUtil.mask(fromBits); 896 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, asAllocatable(inputVal), JavaConstant.forLong(mask))); 897 return result; 898 } else { 899 LIRKind resultKind = LIRKind.combine(inputVal); 900 if (toBits > 32) { 901 resultKind = resultKind.changeType(AMD64Kind.QWORD); 902 } else { 903 resultKind = resultKind.changeType(AMD64Kind.DWORD); 904 } 905 906 /* 907 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 908 * operations implicitly set the upper half of the register to 0, which is what we want 909 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 910 * sometimes one byte shorter. 911 */ 912 switch (fromBits) { 913 case 8: 914 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 915 case 16: 916 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 917 case 32: 918 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 919 } 920 921 // odd bit count, fall back on manual masking 922 Variable result = getLIRGen().newVariable(resultKind); 923 JavaConstant mask; 924 if (toBits > 32) { 925 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 926 } else { 927 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 928 } 929 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, asAllocatable(inputVal), mask)); 930 return result; 931 } 932 } 933 934 @Override 935 public Variable emitBitCount(Value value) { 936 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 937 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 938 if (value.getPlatformKind() == AMD64Kind.QWORD) { 939 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, asAllocatable(value))); 940 } else { 941 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, asAllocatable(value))); 942 } 943 return result; 944 } 945 946 @Override 947 public Variable emitBitScanForward(Value value) { 948 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 949 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, asAllocatable(value))); 950 return result; 951 } 952 953 @Override 954 public Variable emitBitScanReverse(Value value) { 955 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 956 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 957 if (value.getPlatformKind() == AMD64Kind.QWORD) { 958 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, asAllocatable(value))); 959 } else { 960 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, asAllocatable(value))); 961 } 962 return result; 963 } 964 965 @Override 966 public Value emitCountLeadingZeros(Value value) { 967 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 968 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 969 if (value.getPlatformKind() == AMD64Kind.QWORD) { 970 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, asAllocatable(value))); 971 } else { 972 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, asAllocatable(value))); 973 } 974 return result; 975 } 976 977 @Override 978 public Value emitCountTrailingZeros(Value value) { 979 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 980 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 981 if (value.getPlatformKind() == AMD64Kind.QWORD) { 982 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, asAllocatable(value))); 983 } else { 984 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, asAllocatable(value))); 985 } 986 return result; 987 } 988 989 @Override 990 public Value emitLogicalAndNot(Value value1, Value value2) { 991 Variable result = getLIRGen().newVariable(LIRKind.combine(value1, value2)); 992 993 if (value1.getPlatformKind() == AMD64Kind.QWORD) { 994 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.QWORD, result, asAllocatable(value1), asAllocatable(value2))); 995 } else { 996 getLIRGen().append(new AMD64VectorBinary.AVXBinaryOp(VexGeneralPurposeRVMOp.ANDN, AVXSize.DWORD, result, asAllocatable(value1), asAllocatable(value2))); 997 } 998 return result; 999 } 1000 1001 @Override 1002 public Value emitLowestSetIsolatedBit(Value value) { 1003 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1004 1005 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1006 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.QWORD, result, asAllocatable(value))); 1007 } else { 1008 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSI, AVXSize.DWORD, result, asAllocatable(value))); 1009 } 1010 1011 return result; 1012 } 1013 1014 @Override 1015 public Value emitGetMaskUpToLowestSetBit(Value value) { 1016 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1017 1018 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1019 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.QWORD, result, asAllocatable(value))); 1020 } else { 1021 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSMSK, AVXSize.DWORD, result, asAllocatable(value))); 1022 } 1023 1024 return result; 1025 } 1026 1027 @Override 1028 public Value emitResetLowestSetBit(Value value) { 1029 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1030 1031 if (value.getPlatformKind() == AMD64Kind.QWORD) { 1032 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.QWORD, result, asAllocatable(value))); 1033 } else { 1034 getLIRGen().append(new AMD64VectorUnary.AVXUnaryOp(VexGeneralPurposeRMOp.BLSR, AVXSize.DWORD, result, asAllocatable(value))); 1035 } 1036 1037 return result; 1038 } 1039 1040 @Override 1041 public Value emitMathAbs(Value input) { 1042 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1043 switch ((AMD64Kind) input.getPlatformKind()) { 1044 case SINGLE: 1045 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 1046 break; 1047 case DOUBLE: 1048 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 1049 break; 1050 default: 1051 throw GraalError.shouldNotReachHere(); 1052 } 1053 return result; 1054 } 1055 1056 @Override 1057 public Value emitMathSqrt(Value input) { 1058 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1059 switch ((AMD64Kind) input.getPlatformKind()) { 1060 case SINGLE: 1061 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, asAllocatable(input))); 1062 break; 1063 case DOUBLE: 1064 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, asAllocatable(input))); 1065 break; 1066 default: 1067 throw GraalError.shouldNotReachHere(); 1068 } 1069 return result; 1070 } 1071 1072 @Override 1073 public Value emitMathLog(Value input, boolean base10) { 1074 if (base10) { 1075 return new AMD64MathLog10Op().emitLIRWrapper(getLIRGen(), input); 1076 } else { 1077 return new AMD64MathLogOp().emitLIRWrapper(getLIRGen(), input); 1078 } 1079 } 1080 1081 @Override 1082 public Value emitMathCos(Value input) { 1083 return new AMD64MathCosOp().emitLIRWrapper(getLIRGen(), input); 1084 } 1085 1086 @Override 1087 public Value emitMathSin(Value input) { 1088 return new AMD64MathSinOp().emitLIRWrapper(getLIRGen(), input); 1089 } 1090 1091 @Override 1092 public Value emitMathTan(Value input) { 1093 return new AMD64MathTanOp().emitLIRWrapper(getLIRGen(), input); 1094 } 1095 1096 @Override 1097 public Value emitMathExp(Value input) { 1098 return new AMD64MathExpOp().emitLIRWrapper(getLIRGen(), input); 1099 } 1100 1101 @Override 1102 public Value emitMathPow(Value x, Value y) { 1103 return new AMD64MathPowOp().emitLIRWrapper(getLIRGen(), x, y); 1104 } 1105 1106 @Override 1107 public void emitZeroMemory(Value address, Value length) { 1108 RegisterValue lengthReg = moveToReg(AMD64.rcx, length); 1109 getLIRGen().append(new AMD64ZeroMemoryOp(getAMD64LIRGen().asAddressValue(address), lengthReg)); 1110 } 1111 1112 protected AMD64LIRGenerator getAMD64LIRGen() { 1113 return (AMD64LIRGenerator) getLIRGen(); 1114 } 1115 1116 @Override 1117 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1118 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1119 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1120 switch ((AMD64Kind) kind.getPlatformKind()) { 1121 case BYTE: 1122 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1123 break; 1124 case WORD: 1125 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1126 break; 1127 case DWORD: 1128 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1129 break; 1130 case QWORD: 1131 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1132 break; 1133 case SINGLE: 1134 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1135 break; 1136 case DOUBLE: 1137 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1138 break; 1139 default: 1140 throw GraalError.shouldNotReachHere(); 1141 } 1142 return result; 1143 } 1144 1145 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1146 Constant c = value.getConstant(); 1147 if (JavaConstant.isNull(c)) { 1148 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1149 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1150 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1151 return; 1152 } else if (c instanceof VMConstant) { 1153 // only 32-bit constants can be patched 1154 if (kind == AMD64Kind.DWORD) { 1155 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1156 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1157 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1158 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1159 return; 1160 } 1161 } 1162 } else { 1163 JavaConstant jc = (JavaConstant) c; 1164 assert jc.getJavaKind().isPrimitive(); 1165 1166 AMD64MIOp op = AMD64MIOp.MOV; 1167 OperandSize size; 1168 long imm; 1169 1170 switch (kind) { 1171 case BYTE: 1172 op = AMD64MIOp.MOVB; 1173 size = BYTE; 1174 imm = jc.asInt(); 1175 break; 1176 case WORD: 1177 size = WORD; 1178 imm = jc.asInt(); 1179 break; 1180 case DWORD: 1181 size = DWORD; 1182 imm = jc.asInt(); 1183 break; 1184 case QWORD: 1185 size = QWORD; 1186 imm = jc.asLong(); 1187 break; 1188 case SINGLE: 1189 size = DWORD; 1190 imm = Float.floatToRawIntBits(jc.asFloat()); 1191 break; 1192 case DOUBLE: 1193 size = QWORD; 1194 imm = Double.doubleToRawLongBits(jc.asDouble()); 1195 break; 1196 default: 1197 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1198 } 1199 1200 if (NumUtil.isInt(imm)) { 1201 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1202 return; 1203 } 1204 } 1205 1206 // fallback: load, then store 1207 emitStore(kind, address, asAllocatable(value), state); 1208 } 1209 1210 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1211 switch (kind) { 1212 case BYTE: 1213 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1214 break; 1215 case WORD: 1216 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1217 break; 1218 case DWORD: 1219 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1220 break; 1221 case QWORD: 1222 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1223 break; 1224 case SINGLE: 1225 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1226 break; 1227 case DOUBLE: 1228 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1229 break; 1230 default: 1231 throw GraalError.shouldNotReachHere(); 1232 } 1233 } 1234 1235 @Override 1236 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1237 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1238 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1239 if (isConstantValue(input)) { 1240 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1241 } else { 1242 emitStore(kind, storeAddress, asAllocatable(input), state); 1243 } 1244 } 1245 1246 private boolean mustReplaceNullWithNullRegister(Constant nullConstant) { 1247 /* Uncompressed null pointers only */ 1248 return nullRegisterValue != null && JavaConstant.NULL_POINTER.equals(nullConstant); 1249 } 1250 1251 @Override 1252 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1253 OperandSize size; 1254 switch (cmpKind) { 1255 case BYTE: 1256 size = BYTE; 1257 break; 1258 case WORD: 1259 size = WORD; 1260 break; 1261 case DWORD: 1262 size = DWORD; 1263 break; 1264 case QWORD: 1265 size = QWORD; 1266 break; 1267 case SINGLE: 1268 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, asAllocatable(right))); 1269 return; 1270 case DOUBLE: 1271 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, asAllocatable(right))); 1272 return; 1273 default: 1274 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1275 } 1276 1277 if (isConstantValue(right)) { 1278 Constant c = LIRValueUtil.asConstant(right); 1279 if (JavaConstant.isNull(c)) { 1280 if (mustReplaceNullWithNullRegister(c)) { 1281 getLIRGen().append(new AMD64BinaryConsumer.Op(AMD64RMOp.CMP, size, left, nullRegisterValue)); 1282 } else { 1283 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1284 } 1285 return; 1286 } else if (c instanceof VMConstant) { 1287 VMConstant vc = (VMConstant) c; 1288 if (size == DWORD && !GeneratePIC.getValue(getOptions())) { 1289 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1290 } else { 1291 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1292 } 1293 return; 1294 } else if (c instanceof JavaConstant) { 1295 JavaConstant jc = (JavaConstant) c; 1296 if (jc.isDefaultForKind()) { 1297 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1298 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1299 return; 1300 } else if (NumUtil.is32bit(jc.asLong())) { 1301 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1302 return; 1303 } 1304 } 1305 } 1306 1307 // fallback: load, then compare 1308 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, asAllocatable(right))); 1309 } 1310 1311 @Override 1312 public Value emitRound(Value value, RoundingMode mode) { 1313 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1314 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1315 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1316 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1317 } else { 1318 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, asAllocatable(value), mode.encoding)); 1319 } 1320 return result; 1321 } 1322 1323 private boolean supportAVX() { 1324 TargetDescription target = getLIRGen().target(); 1325 return ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 1326 } 1327 1328 private static AVXSize getRegisterSize(Value a) { 1329 AMD64Kind kind = (AMD64Kind) a.getPlatformKind(); 1330 if (kind.isXMM()) { 1331 return AVXKind.getRegisterSize(kind); 1332 } else { 1333 return AVXSize.XMM; 1334 } 1335 } 1336 1337 private Variable emitBinary(LIRKind resultKind, VexRVMOp op, Value a, Value b) { 1338 Variable result = getLIRGen().newVariable(resultKind); 1339 getLIRGen().append(new AVXBinaryOp(op, getRegisterSize(result), result, asAllocatable(a), asAllocatable(b))); 1340 return result; 1341 } 1342 1343 }