1 /* 2 * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 package org.graalvm.compiler.core.amd64; 25 26 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.ADD; 27 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.AND; 28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 29 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.OR; 30 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.SUB; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR; 32 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NEG; 33 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp.NOT; 34 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSF; 35 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.BSR; 36 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.LZCNT; 37 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOV; 38 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSD; 39 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSS; 40 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSX; 41 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXB; 42 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVSXD; 43 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZX; 44 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.MOVZXB; 45 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.POPCNT; 46 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TEST; 47 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TESTB; 48 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp.TZCNT; 49 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROL; 50 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.ROR; 51 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SAR; 52 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHL; 53 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift.SHR; 54 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.BYTE; 55 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.DWORD; 56 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PD; 57 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.PS; 58 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.QWORD; 59 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.SD; 60 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.SS; 61 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize.WORD; 62 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 63 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 64 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 65 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 66 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 67 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM; 68 import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM; 69 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.COS; 70 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG; 71 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG10; 72 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.SIN; 73 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.TAN; 74 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP; 75 import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW; 76 77 import org.graalvm.compiler.asm.NumUtil; 78 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 79 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 80 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp; 81 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MROp; 82 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMIOp; 83 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 84 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RRMOp; 85 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift; 86 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize; 87 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 88 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AVXOp; 89 import org.graalvm.compiler.core.common.LIRKind; 90 import org.graalvm.compiler.core.common.calc.FloatConvert; 91 import org.graalvm.compiler.debug.GraalError; 92 import org.graalvm.compiler.lir.ConstantValue; 93 import org.graalvm.compiler.lir.LIRFrameState; 94 import org.graalvm.compiler.lir.LIRValueUtil; 95 import org.graalvm.compiler.lir.Variable; 96 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 97 import org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FPDivRemOp; 98 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 99 import org.graalvm.compiler.lir.amd64.AMD64Binary; 100 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 101 import org.graalvm.compiler.lir.amd64.AMD64ClearRegisterOp; 102 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp; 103 import org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp; 104 import org.graalvm.compiler.lir.amd64.AMD64MulDivOp; 105 import org.graalvm.compiler.lir.amd64.AMD64ShiftOp; 106 import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp; 107 import org.graalvm.compiler.lir.amd64.AMD64Unary; 108 import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator; 109 110 import jdk.vm.ci.amd64.AMD64; 111 import jdk.vm.ci.amd64.AMD64.CPUFeature; 112 import jdk.vm.ci.amd64.AMD64Kind; 113 import jdk.vm.ci.code.CodeUtil; 114 import jdk.vm.ci.code.Register; 115 import jdk.vm.ci.code.RegisterValue; 116 import jdk.vm.ci.meta.AllocatableValue; 117 import jdk.vm.ci.meta.Constant; 118 import jdk.vm.ci.meta.JavaConstant; 119 import jdk.vm.ci.meta.JavaKind; 120 import jdk.vm.ci.meta.PlatformKind; 121 import jdk.vm.ci.meta.VMConstant; 122 import jdk.vm.ci.meta.Value; 123 import jdk.vm.ci.meta.ValueKind; 124 import jdk.vm.ci.code.TargetDescription; 125 126 /** 127 * This class implements the AMD64 specific portion of the LIR generator. 128 */ 129 public class AMD64ArithmeticLIRGenerator extends ArithmeticLIRGenerator implements AMD64ArithmeticLIRGeneratorTool { 130 131 private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD)); 132 133 @Override 134 public Variable emitNegate(Value inputVal) { 135 AllocatableValue input = getLIRGen().asAllocatable(inputVal); 136 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 137 TargetDescription target = getLIRGen().target(); 138 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 139 switch ((AMD64Kind) input.getPlatformKind()) { 140 case DWORD: 141 getLIRGen().append(new AMD64Unary.MOp(NEG, DWORD, result, input)); 142 break; 143 case QWORD: 144 getLIRGen().append(new AMD64Unary.MOp(NEG, QWORD, result, input)); 145 break; 146 case SINGLE: 147 if (isAvx) { 148 getLIRGen().append(new AMD64Binary.DataThreeOp(AVXOp.XOR, PS, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)), 16)); 149 } else { 150 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PS, result, input, JavaConstant.forFloat(Float.intBitsToFloat(0x80000000)), 16)); 151 } 152 break; 153 case DOUBLE: 154 if (isAvx) { 155 getLIRGen().append(new AMD64Binary.DataThreeOp(AVXOp.XOR, PD, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)), 16)); 156 } else { 157 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.XOR, PD, result, input, JavaConstant.forDouble(Double.longBitsToDouble(0x8000000000000000L)), 16)); 158 } 159 break; 160 default: 161 throw GraalError.shouldNotReachHere(); 162 } 163 return result; 164 } 165 166 @Override 167 public Variable emitNot(Value inputVal) { 168 AllocatableValue input = getLIRGen().asAllocatable(inputVal); 169 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 170 switch ((AMD64Kind) input.getPlatformKind()) { 171 case DWORD: 172 getLIRGen().append(new AMD64Unary.MOp(NOT, DWORD, result, input)); 173 break; 174 case QWORD: 175 getLIRGen().append(new AMD64Unary.MOp(NOT, QWORD, result, input)); 176 break; 177 default: 178 throw GraalError.shouldNotReachHere(); 179 } 180 return result; 181 } 182 183 private Variable emitBinary(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, Value a, Value b, boolean setFlags) { 184 if (isJavaConstant(b)) { 185 return emitBinaryConst(resultKind, op, size, commutative, getLIRGen().asAllocatable(a), asConstantValue(b), setFlags); 186 } else if (commutative && isJavaConstant(a)) { 187 return emitBinaryConst(resultKind, op, size, commutative, getLIRGen().asAllocatable(b), asConstantValue(a), setFlags); 188 } else { 189 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b)); 190 } 191 } 192 193 private Variable emitBinary(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, Value a, Value b) { 194 if (isJavaConstant(b)) { 195 return emitBinaryConst(resultKind, op, size, getLIRGen().asAllocatable(a), asJavaConstant(b)); 196 } else if (commutative && isJavaConstant(a)) { 197 return emitBinaryConst(resultKind, op, size, getLIRGen().asAllocatable(b), asJavaConstant(a)); 198 } else { 199 return emitBinaryVar(resultKind, op, size, commutative, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b)); 200 } 201 } 202 203 private Variable emitBinary(LIRKind resultKind, AMD64RRMOp op, OperandSize size, boolean commutative, Value a, Value b) { 204 if (isJavaConstant(b)) { 205 return emitBinaryConst(resultKind, op, size, getLIRGen().asAllocatable(a), asJavaConstant(b)); 206 } else if (commutative && isJavaConstant(a)) { 207 return emitBinaryConst(resultKind, op, size, getLIRGen().asAllocatable(b), asJavaConstant(a)); 208 } else { 209 return emitBinaryVar(resultKind, op, size, commutative, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b)); 210 } 211 } 212 213 private Variable emitBinaryConst(LIRKind resultKind, AMD64BinaryArithmetic op, OperandSize size, boolean commutative, AllocatableValue a, ConstantValue b, boolean setFlags) { 214 long value = b.getJavaConstant().asLong(); 215 if (NumUtil.isInt(value)) { 216 Variable result = getLIRGen().newVariable(resultKind); 217 int constant = (int) value; 218 219 if (!setFlags) { 220 AMD64MOp mop = getMOp(op, constant); 221 if (mop != null) { 222 getLIRGen().append(new AMD64Unary.MOp(mop, size, result, a)); 223 return result; 224 } 225 } 226 227 getLIRGen().append(new AMD64Binary.ConstOp(op, size, result, a, constant)); 228 return result; 229 } else { 230 return emitBinaryVar(resultKind, op.getRMOpcode(size), size, commutative, a, getLIRGen().asAllocatable(b)); 231 } 232 } 233 234 private static AMD64MOp getMOp(AMD64BinaryArithmetic op, int constant) { 235 if (constant == 1) { 236 if (op.equals(AMD64BinaryArithmetic.ADD)) { 237 return AMD64MOp.INC; 238 } 239 if (op.equals(AMD64BinaryArithmetic.SUB)) { 240 return AMD64MOp.DEC; 241 } 242 } else if (constant == -1) { 243 if (op.equals(AMD64BinaryArithmetic.ADD)) { 244 return AMD64MOp.DEC; 245 } 246 if (op.equals(AMD64BinaryArithmetic.SUB)) { 247 return AMD64MOp.INC; 248 } 249 } 250 return null; 251 } 252 253 private Variable emitBinaryConst(LIRKind resultKind, AMD64RMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 254 Variable result = getLIRGen().newVariable(resultKind); 255 getLIRGen().append(new AMD64Binary.DataTwoOp(op, size, result, a, b)); 256 return result; 257 } 258 259 private Variable emitBinaryConst(LIRKind resultKind, AMD64RRMOp op, OperandSize size, AllocatableValue a, JavaConstant b) { 260 Variable result = getLIRGen().newVariable(resultKind); 261 getLIRGen().append(new AMD64Binary.DataThreeOp(op, size, result, a, b)); 262 return result; 263 } 264 265 private Variable emitBinaryVar(LIRKind resultKind, AMD64RMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 266 Variable result = getLIRGen().newVariable(resultKind); 267 if (commutative) { 268 getLIRGen().append(new AMD64Binary.CommutativeTwoOp(op, size, result, a, b)); 269 } else { 270 getLIRGen().append(new AMD64Binary.TwoOp(op, size, result, a, b)); 271 } 272 return result; 273 } 274 275 private Variable emitBinaryVar(LIRKind resultKind, AMD64RRMOp op, OperandSize size, boolean commutative, AllocatableValue a, AllocatableValue b) { 276 Variable result = getLIRGen().newVariable(resultKind); 277 if (commutative) { 278 getLIRGen().append(new AMD64Binary.CommutativeThreeOp(op, size, result, a, b)); 279 } else { 280 getLIRGen().append(new AMD64Binary.ThreeOp(op, size, result, a, b)); 281 } 282 return result; 283 } 284 285 @Override 286 protected boolean isNumericInteger(PlatformKind kind) { 287 return ((AMD64Kind) kind).isInteger(); 288 } 289 290 @Override 291 public Variable emitAdd(LIRKind resultKind, Value a, Value b, boolean setFlags) { 292 TargetDescription target = getLIRGen().target(); 293 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 294 switch ((AMD64Kind) a.getPlatformKind()) { 295 case DWORD: 296 return emitBinary(resultKind, ADD, DWORD, true, a, b, setFlags); 297 case QWORD: 298 return emitBinary(resultKind, ADD, QWORD, true, a, b, setFlags); 299 case SINGLE: 300 if (isAvx) { 301 return emitBinary(resultKind, AVXOp.ADD, SS, true, a, b); 302 } else { 303 return emitBinary(resultKind, SSEOp.ADD, SS, true, a, b); 304 } 305 case DOUBLE: 306 if (isAvx) { 307 return emitBinary(resultKind, AVXOp.ADD, SD, true, a, b); 308 } else { 309 return emitBinary(resultKind, SSEOp.ADD, SD, true, a, b); 310 } 311 default: 312 throw GraalError.shouldNotReachHere(); 313 } 314 } 315 316 @Override 317 public Variable emitSub(LIRKind resultKind, Value a, Value b, boolean setFlags) { 318 TargetDescription target = getLIRGen().target(); 319 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 320 switch ((AMD64Kind) a.getPlatformKind()) { 321 case DWORD: 322 return emitBinary(resultKind, SUB, DWORD, false, a, b, setFlags); 323 case QWORD: 324 return emitBinary(resultKind, SUB, QWORD, false, a, b, setFlags); 325 case SINGLE: 326 if (isAvx) { 327 return emitBinary(resultKind, AVXOp.SUB, SS, false, a, b); 328 } else { 329 return emitBinary(resultKind, SSEOp.SUB, SS, false, a, b); 330 } 331 case DOUBLE: 332 if (isAvx) { 333 return emitBinary(resultKind, AVXOp.SUB, SD, false, a, b); 334 } else { 335 return emitBinary(resultKind, SSEOp.SUB, SD, false, a, b); 336 } 337 default: 338 throw GraalError.shouldNotReachHere(); 339 } 340 } 341 342 private Variable emitIMULConst(OperandSize size, AllocatableValue a, ConstantValue b) { 343 long value = b.getJavaConstant().asLong(); 344 if (NumUtil.isInt(value)) { 345 int imm = (int) value; 346 AMD64RMIOp op; 347 if (NumUtil.isByte(imm)) { 348 op = AMD64RMIOp.IMUL_SX; 349 } else { 350 op = AMD64RMIOp.IMUL; 351 } 352 353 Variable ret = getLIRGen().newVariable(LIRKind.combine(a, b)); 354 getLIRGen().append(new AMD64Binary.RMIOp(op, size, ret, a, imm)); 355 return ret; 356 } else { 357 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, a, getLIRGen().asAllocatable(b)); 358 } 359 } 360 361 private Variable emitIMUL(OperandSize size, Value a, Value b) { 362 if (isJavaConstant(b)) { 363 return emitIMULConst(size, getLIRGen().asAllocatable(a), asConstantValue(b)); 364 } else if (isJavaConstant(a)) { 365 return emitIMULConst(size, getLIRGen().asAllocatable(b), asConstantValue(a)); 366 } else { 367 return emitBinaryVar(LIRKind.combine(a, b), AMD64RMOp.IMUL, size, true, getLIRGen().asAllocatable(a), getLIRGen().asAllocatable(b)); 368 } 369 } 370 371 @Override 372 public Variable emitMul(Value a, Value b, boolean setFlags) { 373 LIRKind resultKind = LIRKind.combine(a, b); 374 TargetDescription target = getLIRGen().target(); 375 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 376 switch ((AMD64Kind) a.getPlatformKind()) { 377 case DWORD: 378 return emitIMUL(DWORD, a, b); 379 case QWORD: 380 return emitIMUL(QWORD, a, b); 381 case SINGLE: 382 if (isAvx) { 383 return emitBinary(resultKind, AVXOp.MUL, SS, true, a, b); 384 } else { 385 return emitBinary(resultKind, SSEOp.MUL, SS, true, a, b); 386 } 387 case DOUBLE: 388 if (isAvx) { 389 return emitBinary(resultKind, AVXOp.MUL, SD, true, a, b); 390 } else { 391 return emitBinary(resultKind, SSEOp.MUL, SD, true, a, b); 392 } 393 default: 394 throw GraalError.shouldNotReachHere(); 395 } 396 } 397 398 private RegisterValue moveToReg(Register reg, Value v) { 399 RegisterValue ret = reg.asValue(v.getValueKind()); 400 getLIRGen().emitMove(ret, v); 401 return ret; 402 } 403 404 private Value emitMulHigh(AMD64MOp opcode, OperandSize size, Value a, Value b) { 405 AMD64MulDivOp mulHigh = getLIRGen().append(new AMD64MulDivOp(opcode, size, LIRKind.combine(a, b), moveToReg(AMD64.rax, a), getLIRGen().asAllocatable(b))); 406 return getLIRGen().emitMove(mulHigh.getHighResult()); 407 } 408 409 @Override 410 public Value emitMulHigh(Value a, Value b) { 411 switch ((AMD64Kind) a.getPlatformKind()) { 412 case DWORD: 413 return emitMulHigh(AMD64MOp.IMUL, DWORD, a, b); 414 case QWORD: 415 return emitMulHigh(AMD64MOp.IMUL, QWORD, a, b); 416 default: 417 throw GraalError.shouldNotReachHere(); 418 } 419 } 420 421 @Override 422 public Value emitUMulHigh(Value a, Value b) { 423 switch ((AMD64Kind) a.getPlatformKind()) { 424 case DWORD: 425 return emitMulHigh(AMD64MOp.MUL, DWORD, a, b); 426 case QWORD: 427 return emitMulHigh(AMD64MOp.MUL, QWORD, a, b); 428 default: 429 throw GraalError.shouldNotReachHere(); 430 } 431 } 432 433 public Value emitBinaryMemory(AMD64RMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 434 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 435 getLIRGen().append(new AMD64Binary.MemoryTwoOp(op, size, result, a, location, state)); 436 return result; 437 } 438 439 public Value emitBinaryMemory(AMD64RRMOp op, OperandSize size, AllocatableValue a, AMD64AddressValue location, LIRFrameState state) { 440 Variable result = getLIRGen().newVariable(LIRKind.combine(a)); 441 getLIRGen().append(new AMD64Binary.MemoryThreeOp(op, size, result, a, location, state)); 442 return result; 443 } 444 445 protected Value emitConvertMemoryOp(PlatformKind kind, AMD64RMOp op, OperandSize size, AMD64AddressValue address, LIRFrameState state) { 446 Variable result = getLIRGen().newVariable(LIRKind.value(kind)); 447 getLIRGen().append(new AMD64Unary.MemoryOp(op, size, result, address, state)); 448 return result; 449 } 450 451 protected Value emitZeroExtendMemory(AMD64Kind memoryKind, int resultBits, AMD64AddressValue address, LIRFrameState state) { 452 // Issue a zero extending load of the proper bit size and set the result to 453 // the proper kind. 454 Variable result = getLIRGen().newVariable(LIRKind.value(resultBits == 32 ? AMD64Kind.DWORD : AMD64Kind.QWORD)); 455 switch (memoryKind) { 456 case BYTE: 457 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZXB, DWORD, result, address, state)); 458 break; 459 case WORD: 460 getLIRGen().append(new AMD64Unary.MemoryOp(MOVZX, DWORD, result, address, state)); 461 break; 462 case DWORD: 463 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, address, state)); 464 break; 465 case QWORD: 466 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, address, state)); 467 break; 468 default: 469 throw GraalError.shouldNotReachHere(); 470 } 471 return result; 472 } 473 474 private AMD64MulDivOp emitIDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 475 LIRKind kind = LIRKind.combine(a, b); 476 477 AMD64SignExtendOp sx = getLIRGen().append(new AMD64SignExtendOp(size, kind, moveToReg(AMD64.rax, a))); 478 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.IDIV, size, kind, sx.getHighResult(), sx.getLowResult(), getLIRGen().asAllocatable(b), state)); 479 } 480 481 private AMD64MulDivOp emitDIV(OperandSize size, Value a, Value b, LIRFrameState state) { 482 LIRKind kind = LIRKind.combine(a, b); 483 484 RegisterValue rax = moveToReg(AMD64.rax, a); 485 RegisterValue rdx = AMD64.rdx.asValue(kind); 486 getLIRGen().append(new AMD64ClearRegisterOp(size, rdx)); 487 return getLIRGen().append(new AMD64MulDivOp(AMD64MOp.DIV, size, kind, rdx, rax, getLIRGen().asAllocatable(b), state)); 488 } 489 490 public Value[] emitSignedDivRem(Value a, Value b, LIRFrameState state) { 491 AMD64MulDivOp op; 492 switch ((AMD64Kind) a.getPlatformKind()) { 493 case DWORD: 494 op = emitIDIV(DWORD, a, b, state); 495 break; 496 case QWORD: 497 op = emitIDIV(QWORD, a, b, state); 498 break; 499 default: 500 throw GraalError.shouldNotReachHere(); 501 } 502 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 503 } 504 505 public Value[] emitUnsignedDivRem(Value a, Value b, LIRFrameState state) { 506 AMD64MulDivOp op; 507 switch ((AMD64Kind) a.getPlatformKind()) { 508 case DWORD: 509 op = emitDIV(DWORD, a, b, state); 510 break; 511 case QWORD: 512 op = emitDIV(QWORD, a, b, state); 513 break; 514 default: 515 throw GraalError.shouldNotReachHere(); 516 } 517 return new Value[]{getLIRGen().emitMove(op.getQuotient()), getLIRGen().emitMove(op.getRemainder())}; 518 } 519 520 @Override 521 public Value emitDiv(Value a, Value b, LIRFrameState state) { 522 TargetDescription target = getLIRGen().target(); 523 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 524 LIRKind resultKind = LIRKind.combine(a, b); 525 switch ((AMD64Kind) a.getPlatformKind()) { 526 case DWORD: 527 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 528 return getLIRGen().emitMove(op.getQuotient()); 529 case QWORD: 530 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 531 return getLIRGen().emitMove(lop.getQuotient()); 532 case SINGLE: 533 if (isAvx) { 534 return emitBinary(resultKind, AVXOp.DIV, SS, false, a, b); 535 } else { 536 return emitBinary(resultKind, SSEOp.DIV, SS, false, a, b); 537 } 538 case DOUBLE: 539 if (isAvx) { 540 return emitBinary(resultKind, AVXOp.DIV, SD, false, a, b); 541 } else { 542 return emitBinary(resultKind, SSEOp.DIV, SD, false, a, b); 543 } 544 default: 545 throw GraalError.shouldNotReachHere(); 546 } 547 } 548 549 @Override 550 public Value emitRem(Value a, Value b, LIRFrameState state) { 551 switch ((AMD64Kind) a.getPlatformKind()) { 552 case DWORD: 553 AMD64MulDivOp op = emitIDIV(DWORD, a, b, state); 554 return getLIRGen().emitMove(op.getRemainder()); 555 case QWORD: 556 AMD64MulDivOp lop = emitIDIV(QWORD, a, b, state); 557 return getLIRGen().emitMove(lop.getRemainder()); 558 case SINGLE: { 559 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 560 getLIRGen().append(new FPDivRemOp(FREM, result, getLIRGen().load(a), getLIRGen().load(b))); 561 return result; 562 } 563 case DOUBLE: { 564 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b)); 565 getLIRGen().append(new FPDivRemOp(DREM, result, getLIRGen().load(a), getLIRGen().load(b))); 566 return result; 567 } 568 default: 569 throw GraalError.shouldNotReachHere(); 570 } 571 } 572 573 @Override 574 public Variable emitUDiv(Value a, Value b, LIRFrameState state) { 575 AMD64MulDivOp op; 576 switch ((AMD64Kind) a.getPlatformKind()) { 577 case DWORD: 578 op = emitDIV(DWORD, a, b, state); 579 break; 580 case QWORD: 581 op = emitDIV(QWORD, a, b, state); 582 break; 583 default: 584 throw GraalError.shouldNotReachHere(); 585 } 586 return getLIRGen().emitMove(op.getQuotient()); 587 } 588 589 @Override 590 public Variable emitURem(Value a, Value b, LIRFrameState state) { 591 AMD64MulDivOp op; 592 switch ((AMD64Kind) a.getPlatformKind()) { 593 case DWORD: 594 op = emitDIV(DWORD, a, b, state); 595 break; 596 case QWORD: 597 op = emitDIV(QWORD, a, b, state); 598 break; 599 default: 600 throw GraalError.shouldNotReachHere(); 601 } 602 return getLIRGen().emitMove(op.getRemainder()); 603 } 604 605 @Override 606 public Variable emitAnd(Value a, Value b) { 607 LIRKind resultKind = LIRKind.combine(a, b); 608 TargetDescription target = getLIRGen().target(); 609 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 610 switch ((AMD64Kind) a.getPlatformKind()) { 611 case DWORD: 612 return emitBinary(resultKind, AND, DWORD, true, a, b, false); 613 case QWORD: 614 return emitBinary(resultKind, AND, QWORD, true, a, b, false); 615 case SINGLE: 616 if (isAvx) { 617 return emitBinary(resultKind, AVXOp.AND, PS, true, a, b); 618 } else { 619 return emitBinary(resultKind, SSEOp.AND, PS, true, a, b); 620 } 621 case DOUBLE: 622 if (isAvx) { 623 return emitBinary(resultKind, AVXOp.AND, PD, true, a, b); 624 } else { 625 return emitBinary(resultKind, SSEOp.AND, PD, true, a, b); 626 } 627 default: 628 throw GraalError.shouldNotReachHere(); 629 } 630 } 631 632 @Override 633 public Variable emitOr(Value a, Value b) { 634 LIRKind resultKind = LIRKind.combine(a, b); 635 TargetDescription target = getLIRGen().target(); 636 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 637 switch ((AMD64Kind) a.getPlatformKind()) { 638 case DWORD: 639 return emitBinary(resultKind, OR, DWORD, true, a, b, false); 640 case QWORD: 641 return emitBinary(resultKind, OR, QWORD, true, a, b, false); 642 case SINGLE: 643 if (isAvx) { 644 return emitBinary(resultKind, AVXOp.OR, PS, true, a, b); 645 } else { 646 return emitBinary(resultKind, SSEOp.OR, PS, true, a, b); 647 } 648 case DOUBLE: 649 if (isAvx) { 650 return emitBinary(resultKind, AVXOp.OR, PD, true, a, b); 651 } else { 652 return emitBinary(resultKind, SSEOp.OR, PD, true, a, b); 653 } 654 default: 655 throw GraalError.shouldNotReachHere(); 656 } 657 } 658 659 @Override 660 public Variable emitXor(Value a, Value b) { 661 LIRKind resultKind = LIRKind.combine(a, b); 662 TargetDescription target = getLIRGen().target(); 663 boolean isAvx = ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX); 664 switch ((AMD64Kind) a.getPlatformKind()) { 665 case DWORD: 666 return emitBinary(resultKind, XOR, DWORD, true, a, b, false); 667 case QWORD: 668 return emitBinary(resultKind, XOR, QWORD, true, a, b, false); 669 case SINGLE: 670 if (isAvx) { 671 return emitBinary(resultKind, AVXOp.XOR, PS, true, a, b); 672 } else { 673 return emitBinary(resultKind, SSEOp.XOR, PS, true, a, b); 674 } 675 case DOUBLE: 676 if (isAvx) { 677 return emitBinary(resultKind, AVXOp.XOR, PD, true, a, b); 678 } else { 679 return emitBinary(resultKind, SSEOp.XOR, PD, true, a, b); 680 } 681 default: 682 throw GraalError.shouldNotReachHere(); 683 } 684 } 685 686 private Variable emitShift(AMD64Shift op, OperandSize size, Value a, Value b) { 687 Variable result = getLIRGen().newVariable(LIRKind.combine(a, b).changeType(a.getPlatformKind())); 688 AllocatableValue input = getLIRGen().asAllocatable(a); 689 if (isJavaConstant(b)) { 690 JavaConstant c = asJavaConstant(b); 691 if (c.asLong() == 1) { 692 getLIRGen().append(new AMD64Unary.MOp(op.m1Op, size, result, input)); 693 } else { 694 /* 695 * c is implicitly masked to 5 or 6 bits by the CPU, so casting it to (int) is 696 * always correct, even without the NumUtil.is32bit() test. 697 */ 698 getLIRGen().append(new AMD64Binary.ConstOp(op.miOp, size, result, input, (int) c.asLong())); 699 } 700 } else { 701 getLIRGen().emitMove(RCX_I, b); 702 getLIRGen().append(new AMD64ShiftOp(op.mcOp, size, result, input, RCX_I)); 703 } 704 return result; 705 } 706 707 @Override 708 public Variable emitShl(Value a, Value b) { 709 switch ((AMD64Kind) a.getPlatformKind()) { 710 case DWORD: 711 return emitShift(SHL, DWORD, a, b); 712 case QWORD: 713 return emitShift(SHL, QWORD, a, b); 714 default: 715 throw GraalError.shouldNotReachHere(); 716 } 717 } 718 719 @Override 720 public Variable emitShr(Value a, Value b) { 721 switch ((AMD64Kind) a.getPlatformKind()) { 722 case DWORD: 723 return emitShift(SAR, DWORD, a, b); 724 case QWORD: 725 return emitShift(SAR, QWORD, a, b); 726 default: 727 throw GraalError.shouldNotReachHere(); 728 } 729 } 730 731 @Override 732 public Variable emitUShr(Value a, Value b) { 733 switch ((AMD64Kind) a.getPlatformKind()) { 734 case DWORD: 735 return emitShift(SHR, DWORD, a, b); 736 case QWORD: 737 return emitShift(SHR, QWORD, a, b); 738 default: 739 throw GraalError.shouldNotReachHere(); 740 } 741 } 742 743 public Variable emitRol(Value a, Value b) { 744 switch ((AMD64Kind) a.getPlatformKind()) { 745 case DWORD: 746 return emitShift(ROL, DWORD, a, b); 747 case QWORD: 748 return emitShift(ROL, QWORD, a, b); 749 default: 750 throw GraalError.shouldNotReachHere(); 751 } 752 } 753 754 public Variable emitRor(Value a, Value b) { 755 switch ((AMD64Kind) a.getPlatformKind()) { 756 case DWORD: 757 return emitShift(ROR, DWORD, a, b); 758 case QWORD: 759 return emitShift(ROR, QWORD, a, b); 760 default: 761 throw GraalError.shouldNotReachHere(); 762 } 763 } 764 765 private AllocatableValue emitConvertOp(LIRKind kind, AMD64RMOp op, OperandSize size, Value input) { 766 Variable result = getLIRGen().newVariable(kind); 767 getLIRGen().append(new AMD64Unary.RMOp(op, size, result, getLIRGen().asAllocatable(input))); 768 return result; 769 } 770 771 private AllocatableValue emitConvertOp(LIRKind kind, AMD64MROp op, OperandSize size, Value input) { 772 Variable result = getLIRGen().newVariable(kind); 773 getLIRGen().append(new AMD64Unary.MROp(op, size, result, getLIRGen().asAllocatable(input))); 774 return result; 775 } 776 777 @Override 778 public Value emitReinterpret(LIRKind to, Value inputVal) { 779 ValueKind<?> from = inputVal.getValueKind(); 780 if (to.equals(from)) { 781 return inputVal; 782 } 783 784 AllocatableValue input = getLIRGen().asAllocatable(inputVal); 785 /* 786 * Conversions between integer to floating point types require moves between CPU and FPU 787 * registers. 788 */ 789 AMD64Kind fromKind = (AMD64Kind) from.getPlatformKind(); 790 switch ((AMD64Kind) to.getPlatformKind()) { 791 case DWORD: 792 switch (fromKind) { 793 case SINGLE: 794 return emitConvertOp(to, AMD64MROp.MOVD, DWORD, input); 795 } 796 break; 797 case QWORD: 798 switch (fromKind) { 799 case DOUBLE: 800 return emitConvertOp(to, AMD64MROp.MOVQ, QWORD, input); 801 } 802 break; 803 case SINGLE: 804 switch (fromKind) { 805 case DWORD: 806 return emitConvertOp(to, AMD64RMOp.MOVD, DWORD, input); 807 } 808 break; 809 case DOUBLE: 810 switch (fromKind) { 811 case QWORD: 812 return emitConvertOp(to, AMD64RMOp.MOVQ, QWORD, input); 813 } 814 break; 815 } 816 throw GraalError.shouldNotReachHere(); 817 } 818 819 @Override 820 public Value emitFloatConvert(FloatConvert op, Value input) { 821 switch (op) { 822 case D2F: 823 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSD2SS, SD, input); 824 case D2I: 825 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSD2SI, DWORD, input); 826 case D2L: 827 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSD2SI, QWORD, input); 828 case F2D: 829 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSS2SD, SS, input); 830 case F2I: 831 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DWORD), SSEOp.CVTTSS2SI, DWORD, input); 832 case F2L: 833 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.QWORD), SSEOp.CVTTSS2SI, QWORD, input); 834 case I2D: 835 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, DWORD, input); 836 case I2F: 837 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, DWORD, input); 838 case L2D: 839 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.DOUBLE), SSEOp.CVTSI2SD, QWORD, input); 840 case L2F: 841 return emitConvertOp(LIRKind.combine(input).changeType(AMD64Kind.SINGLE), SSEOp.CVTSI2SS, QWORD, input); 842 default: 843 throw GraalError.shouldNotReachHere(); 844 } 845 } 846 847 @Override 848 public Value emitNarrow(Value inputVal, int bits) { 849 if (inputVal.getPlatformKind() == AMD64Kind.QWORD && bits <= 32) { 850 // TODO make it possible to reinterpret Long as Int in LIR without move 851 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), AMD64RMOp.MOV, DWORD, inputVal); 852 } else { 853 return inputVal; 854 } 855 } 856 857 @Override 858 public Value emitSignExtend(Value inputVal, int fromBits, int toBits) { 859 assert fromBits <= toBits && toBits <= 64; 860 if (fromBits == toBits) { 861 return inputVal; 862 } else if (toBits > 32) { 863 // sign extend to 64 bits 864 switch (fromBits) { 865 case 8: 866 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXB, QWORD, inputVal); 867 case 16: 868 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSX, QWORD, inputVal); 869 case 32: 870 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.QWORD), MOVSXD, QWORD, inputVal); 871 default: 872 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 873 } 874 } else { 875 // sign extend to 32 bits (smaller values are internally represented as 32 bit values) 876 switch (fromBits) { 877 case 8: 878 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSXB, DWORD, inputVal); 879 case 16: 880 return emitConvertOp(LIRKind.combine(inputVal).changeType(AMD64Kind.DWORD), MOVSX, DWORD, inputVal); 881 case 32: 882 return inputVal; 883 default: 884 throw GraalError.unimplemented("unsupported sign extension (" + fromBits + " bit -> " + toBits + " bit)"); 885 } 886 } 887 } 888 889 @Override 890 public Value emitZeroExtend(Value inputVal, int fromBits, int toBits) { 891 assert fromBits <= toBits && toBits <= 64; 892 if (fromBits == toBits) { 893 return inputVal; 894 } else if (fromBits > 32) { 895 assert inputVal.getPlatformKind() == AMD64Kind.QWORD; 896 Variable result = getLIRGen().newVariable(LIRKind.combine(inputVal)); 897 long mask = CodeUtil.mask(fromBits); 898 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(QWORD), QWORD, result, getLIRGen().asAllocatable(inputVal), JavaConstant.forLong(mask))); 899 return result; 900 } else { 901 LIRKind resultKind = LIRKind.combine(inputVal); 902 if (toBits > 32) { 903 resultKind = resultKind.changeType(AMD64Kind.QWORD); 904 } else { 905 resultKind = resultKind.changeType(AMD64Kind.DWORD); 906 } 907 908 /* 909 * Always emit DWORD operations, even if the resultKind is Long. On AMD64, all DWORD 910 * operations implicitly set the upper half of the register to 0, which is what we want 911 * anyway. Compared to the QWORD oparations, the encoding of the DWORD operations is 912 * sometimes one byte shorter. 913 */ 914 switch (fromBits) { 915 case 8: 916 return emitConvertOp(resultKind, MOVZXB, DWORD, inputVal); 917 case 16: 918 return emitConvertOp(resultKind, MOVZX, DWORD, inputVal); 919 case 32: 920 return emitConvertOp(resultKind, MOV, DWORD, inputVal); 921 } 922 923 // odd bit count, fall back on manual masking 924 Variable result = getLIRGen().newVariable(resultKind); 925 JavaConstant mask; 926 if (toBits > 32) { 927 mask = JavaConstant.forLong(CodeUtil.mask(fromBits)); 928 } else { 929 mask = JavaConstant.forInt((int) CodeUtil.mask(fromBits)); 930 } 931 getLIRGen().append(new AMD64Binary.DataTwoOp(AND.getRMOpcode(DWORD), DWORD, result, getLIRGen().asAllocatable(inputVal), mask)); 932 return result; 933 } 934 } 935 936 @Override 937 public Variable emitBitCount(Value value) { 938 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 939 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 940 if (value.getPlatformKind() == AMD64Kind.QWORD) { 941 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, QWORD, result, getLIRGen().asAllocatable(value))); 942 } else { 943 getLIRGen().append(new AMD64Unary.RMOp(POPCNT, DWORD, result, getLIRGen().asAllocatable(value))); 944 } 945 return result; 946 } 947 948 @Override 949 public Variable emitBitScanForward(Value value) { 950 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 951 getLIRGen().append(new AMD64Unary.RMOp(BSF, QWORD, result, getLIRGen().asAllocatable(value))); 952 return result; 953 } 954 955 @Override 956 public Variable emitBitScanReverse(Value value) { 957 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 958 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 959 if (value.getPlatformKind() == AMD64Kind.QWORD) { 960 getLIRGen().append(new AMD64Unary.RMOp(BSR, QWORD, result, getLIRGen().asAllocatable(value))); 961 } else { 962 getLIRGen().append(new AMD64Unary.RMOp(BSR, DWORD, result, getLIRGen().asAllocatable(value))); 963 } 964 return result; 965 } 966 967 @Override 968 public Value emitCountLeadingZeros(Value value) { 969 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 970 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 971 if (value.getPlatformKind() == AMD64Kind.QWORD) { 972 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, QWORD, result, getLIRGen().asAllocatable(value))); 973 } else { 974 getLIRGen().append(new AMD64Unary.RMOp(LZCNT, DWORD, result, getLIRGen().asAllocatable(value))); 975 } 976 return result; 977 } 978 979 @Override 980 public Value emitCountTrailingZeros(Value value) { 981 Variable result = getLIRGen().newVariable(LIRKind.combine(value).changeType(AMD64Kind.DWORD)); 982 assert ((AMD64Kind) value.getPlatformKind()).isInteger(); 983 if (value.getPlatformKind() == AMD64Kind.QWORD) { 984 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, QWORD, result, getLIRGen().asAllocatable(value))); 985 } else { 986 getLIRGen().append(new AMD64Unary.RMOp(TZCNT, DWORD, result, getLIRGen().asAllocatable(value))); 987 } 988 return result; 989 } 990 991 @Override 992 public Value emitMathAbs(Value input) { 993 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 994 switch ((AMD64Kind) input.getPlatformKind()) { 995 case SINGLE: 996 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PS, result, getLIRGen().asAllocatable(input), JavaConstant.forFloat(Float.intBitsToFloat(0x7FFFFFFF)), 16)); 997 break; 998 case DOUBLE: 999 getLIRGen().append(new AMD64Binary.DataTwoOp(SSEOp.AND, PD, result, getLIRGen().asAllocatable(input), JavaConstant.forDouble(Double.longBitsToDouble(0x7FFFFFFFFFFFFFFFL)), 16)); 1000 break; 1001 default: 1002 throw GraalError.shouldNotReachHere(); 1003 } 1004 return result; 1005 } 1006 1007 @Override 1008 public Value emitMathSqrt(Value input) { 1009 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1010 switch ((AMD64Kind) input.getPlatformKind()) { 1011 case SINGLE: 1012 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SS, result, getLIRGen().asAllocatable(input))); 1013 break; 1014 case DOUBLE: 1015 getLIRGen().append(new AMD64Unary.RMOp(SSEOp.SQRT, SD, result, getLIRGen().asAllocatable(input))); 1016 break; 1017 default: 1018 throw GraalError.shouldNotReachHere(); 1019 } 1020 return result; 1021 } 1022 1023 @Override 1024 public Value emitMathLog(Value input, boolean base10) { 1025 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1026 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1027 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), base10 ? LOG10 : LOG, result, getLIRGen().asAllocatable(input), stackSlot)); 1028 return result; 1029 } 1030 1031 @Override 1032 public Value emitMathCos(Value input) { 1033 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1034 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1035 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), COS, result, getLIRGen().asAllocatable(input), stackSlot)); 1036 return result; 1037 } 1038 1039 @Override 1040 public Value emitMathSin(Value input) { 1041 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1042 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1043 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), SIN, result, getLIRGen().asAllocatable(input), stackSlot)); 1044 return result; 1045 } 1046 1047 @Override 1048 public Value emitMathTan(Value input) { 1049 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1050 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1051 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), TAN, result, getLIRGen().asAllocatable(input), stackSlot)); 1052 return result; 1053 } 1054 1055 @Override 1056 public Value emitMathExp(Value input) { 1057 Variable result = getLIRGen().newVariable(LIRKind.combine(input)); 1058 AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD)); 1059 getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), EXP, result, getLIRGen().asAllocatable(input), stackSlot)); 1060 return result; 1061 } 1062 1063 @Override 1064 public Value emitMathPow(Value input1, Value input2) { 1065 Variable result = getLIRGen().newVariable(LIRKind.combine(input1)); 1066 getLIRGen().append(new AMD64MathIntrinsicBinaryOp(getAMD64LIRGen(), POW, result, getLIRGen().asAllocatable(input1), getLIRGen().asAllocatable(input2))); 1067 return result; 1068 } 1069 1070 protected AMD64LIRGenerator getAMD64LIRGen() { 1071 return (AMD64LIRGenerator) getLIRGen(); 1072 } 1073 1074 @Override 1075 public Variable emitLoad(LIRKind kind, Value address, LIRFrameState state) { 1076 AMD64AddressValue loadAddress = getAMD64LIRGen().asAddressValue(address); 1077 Variable result = getLIRGen().newVariable(getLIRGen().toRegisterKind(kind)); 1078 switch ((AMD64Kind) kind.getPlatformKind()) { 1079 case BYTE: 1080 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSXB, DWORD, result, loadAddress, state)); 1081 break; 1082 case WORD: 1083 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSX, DWORD, result, loadAddress, state)); 1084 break; 1085 case DWORD: 1086 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, DWORD, result, loadAddress, state)); 1087 break; 1088 case QWORD: 1089 getLIRGen().append(new AMD64Unary.MemoryOp(MOV, QWORD, result, loadAddress, state)); 1090 break; 1091 case SINGLE: 1092 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSS, SS, result, loadAddress, state)); 1093 break; 1094 case DOUBLE: 1095 getLIRGen().append(new AMD64Unary.MemoryOp(MOVSD, SD, result, loadAddress, state)); 1096 break; 1097 default: 1098 throw GraalError.shouldNotReachHere(); 1099 } 1100 return result; 1101 } 1102 1103 protected void emitStoreConst(AMD64Kind kind, AMD64AddressValue address, ConstantValue value, LIRFrameState state) { 1104 Constant c = value.getConstant(); 1105 if (JavaConstant.isNull(c)) { 1106 assert kind == AMD64Kind.DWORD || kind == AMD64Kind.QWORD; 1107 OperandSize size = kind == AMD64Kind.DWORD ? DWORD : QWORD; 1108 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(AMD64MIOp.MOV, size, address, 0, state)); 1109 return; 1110 } else if (c instanceof VMConstant) { 1111 // only 32-bit constants can be patched 1112 if (kind == AMD64Kind.DWORD) { 1113 if (getLIRGen().target().inlineObjects || !(c instanceof JavaConstant)) { 1114 // if c is a JavaConstant, it's an oop, otherwise it's a metaspace constant 1115 assert !(c instanceof JavaConstant) || ((JavaConstant) c).getJavaKind() == JavaKind.Object; 1116 getLIRGen().append(new AMD64BinaryConsumer.MemoryVMConstOp(AMD64MIOp.MOV, address, (VMConstant) c, state)); 1117 return; 1118 } 1119 } 1120 } else { 1121 JavaConstant jc = (JavaConstant) c; 1122 assert jc.getJavaKind().isPrimitive(); 1123 1124 AMD64MIOp op = AMD64MIOp.MOV; 1125 OperandSize size; 1126 long imm; 1127 1128 switch (kind) { 1129 case BYTE: 1130 op = AMD64MIOp.MOVB; 1131 size = BYTE; 1132 imm = jc.asInt(); 1133 break; 1134 case WORD: 1135 size = WORD; 1136 imm = jc.asInt(); 1137 break; 1138 case DWORD: 1139 size = DWORD; 1140 imm = jc.asInt(); 1141 break; 1142 case QWORD: 1143 size = QWORD; 1144 imm = jc.asLong(); 1145 break; 1146 case SINGLE: 1147 size = DWORD; 1148 imm = Float.floatToRawIntBits(jc.asFloat()); 1149 break; 1150 case DOUBLE: 1151 size = QWORD; 1152 imm = Double.doubleToRawLongBits(jc.asDouble()); 1153 break; 1154 default: 1155 throw GraalError.shouldNotReachHere("unexpected kind " + kind); 1156 } 1157 1158 if (NumUtil.isInt(imm)) { 1159 getLIRGen().append(new AMD64BinaryConsumer.MemoryConstOp(op, size, address, (int) imm, state)); 1160 return; 1161 } 1162 } 1163 1164 // fallback: load, then store 1165 emitStore(kind, address, getLIRGen().asAllocatable(value), state); 1166 } 1167 1168 protected void emitStore(AMD64Kind kind, AMD64AddressValue address, AllocatableValue value, LIRFrameState state) { 1169 switch (kind) { 1170 case BYTE: 1171 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVB, BYTE, address, value, state)); 1172 break; 1173 case WORD: 1174 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, WORD, address, value, state)); 1175 break; 1176 case DWORD: 1177 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, DWORD, address, value, state)); 1178 break; 1179 case QWORD: 1180 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOV, QWORD, address, value, state)); 1181 break; 1182 case SINGLE: 1183 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSS, SS, address, value, state)); 1184 break; 1185 case DOUBLE: 1186 getLIRGen().append(new AMD64BinaryConsumer.MemoryMROp(AMD64MROp.MOVSD, SD, address, value, state)); 1187 break; 1188 default: 1189 throw GraalError.shouldNotReachHere(); 1190 } 1191 } 1192 1193 @Override 1194 public void emitStore(ValueKind<?> lirKind, Value address, Value input, LIRFrameState state) { 1195 AMD64AddressValue storeAddress = getAMD64LIRGen().asAddressValue(address); 1196 AMD64Kind kind = (AMD64Kind) lirKind.getPlatformKind(); 1197 if (isConstantValue(input)) { 1198 emitStoreConst(kind, storeAddress, asConstantValue(input), state); 1199 } else { 1200 emitStore(kind, storeAddress, getLIRGen().asAllocatable(input), state); 1201 } 1202 } 1203 1204 @Override 1205 public void emitCompareOp(AMD64Kind cmpKind, Variable left, Value right) { 1206 OperandSize size; 1207 switch (cmpKind) { 1208 case BYTE: 1209 size = BYTE; 1210 break; 1211 case WORD: 1212 size = WORD; 1213 break; 1214 case DWORD: 1215 size = DWORD; 1216 break; 1217 case QWORD: 1218 size = QWORD; 1219 break; 1220 case SINGLE: 1221 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PS, left, getLIRGen().asAllocatable(right))); 1222 return; 1223 case DOUBLE: 1224 getLIRGen().append(new AMD64BinaryConsumer.Op(SSEOp.UCOMIS, PD, left, getLIRGen().asAllocatable(right))); 1225 return; 1226 default: 1227 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 1228 } 1229 1230 if (isConstantValue(right)) { 1231 Constant c = LIRValueUtil.asConstant(right); 1232 if (JavaConstant.isNull(c)) { 1233 getLIRGen().append(new AMD64BinaryConsumer.Op(TEST, size, left, left)); 1234 return; 1235 } else if (c instanceof VMConstant) { 1236 VMConstant vc = (VMConstant) c; 1237 if (size == DWORD && !GeneratePIC.getValue()) { 1238 getLIRGen().append(new AMD64BinaryConsumer.VMConstOp(CMP.getMIOpcode(DWORD, false), left, vc)); 1239 } else { 1240 getLIRGen().append(new AMD64BinaryConsumer.DataOp(CMP.getRMOpcode(size), size, left, vc)); 1241 } 1242 return; 1243 } else if (c instanceof JavaConstant) { 1244 JavaConstant jc = (JavaConstant) c; 1245 if (jc.isDefaultForKind()) { 1246 AMD64RMOp op = size == BYTE ? TESTB : TEST; 1247 getLIRGen().append(new AMD64BinaryConsumer.Op(op, size, left, left)); 1248 return; 1249 } else if (NumUtil.is32bit(jc.asLong())) { 1250 getLIRGen().append(new AMD64BinaryConsumer.ConstOp(CMP, size, left, (int) jc.asLong())); 1251 return; 1252 } 1253 } 1254 } 1255 1256 // fallback: load, then compare 1257 getLIRGen().append(new AMD64BinaryConsumer.Op(CMP.getRMOpcode(size), size, left, getLIRGen().asAllocatable(right))); 1258 } 1259 1260 @Override 1261 public Value emitRound(Value value, RoundingMode mode) { 1262 Variable result = getLIRGen().newVariable(LIRKind.combine(value)); 1263 assert ((AMD64Kind) value.getPlatformKind()).isXMM(); 1264 if (value.getPlatformKind() == AMD64Kind.SINGLE) { 1265 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSS, OperandSize.PD, result, getLIRGen().asAllocatable(value), mode.encoding)); 1266 } else { 1267 getLIRGen().append(new AMD64Binary.RMIOp(AMD64RMIOp.ROUNDSD, OperandSize.PD, result, getLIRGen().asAllocatable(value), mode.encoding)); 1268 } 1269 return result; 1270 } 1271 }