1 /*
   2  * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 
  25 package org.graalvm.compiler.lir.amd64;
  26 
  27 import static jdk.vm.ci.code.ValueUtil.asRegister;
  28 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.XOR;
  29 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
  30 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  31 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  32 
  33 import java.util.Objects;
  34 
  35 import org.graalvm.compiler.asm.Label;
  36 import org.graalvm.compiler.asm.amd64.AMD64Address;
  37 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  38 import org.graalvm.compiler.asm.amd64.AMD64Assembler;
  39 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  40 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
  41 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
  42 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  43 import org.graalvm.compiler.asm.amd64.AVXKind;
  44 import org.graalvm.compiler.core.common.LIRKind;
  45 import org.graalvm.compiler.debug.GraalError;
  46 import org.graalvm.compiler.lir.LIRInstructionClass;
  47 import org.graalvm.compiler.lir.LIRValueUtil;
  48 import org.graalvm.compiler.lir.Opcode;
  49 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  50 import org.graalvm.compiler.lir.gen.LIRGeneratorTool;
  51 
  52 import jdk.vm.ci.amd64.AMD64;
  53 import jdk.vm.ci.amd64.AMD64.CPUFeature;
  54 import jdk.vm.ci.amd64.AMD64Kind;
  55 import jdk.vm.ci.code.Register;
  56 import jdk.vm.ci.code.TargetDescription;
  57 import jdk.vm.ci.meta.JavaKind;
  58 import jdk.vm.ci.meta.Value;
  59 
  60 /**
  61  * Emits code which compares two arrays of the same length. If the CPU supports any vector
  62  * instructions specialized code is emitted to leverage these instructions.
  63  *
  64  * This op can also compare arrays of different integer types (e.g. {@code byte[]} and
  65  * {@code char[]}) with on-the-fly sign- or zero-extension. If one of the given arrays is a
  66  * {@code char[]} array, the smaller elements are zero-extended, otherwise they are sign-extended.
  67  */
  68 @Opcode("ARRAY_EQUALS")
  69 public final class AMD64ArrayEqualsOp extends AMD64LIRInstruction {
  70     public static final LIRInstructionClass<AMD64ArrayEqualsOp> TYPE = LIRInstructionClass.create(AMD64ArrayEqualsOp.class);
  71 
  72     private final JavaKind kind1;
  73     private final JavaKind kind2;
  74     private final int arrayBaseOffset1;
  75     private final int arrayBaseOffset2;
  76     private final Scale arrayIndexScale1;
  77     private final Scale arrayIndexScale2;
  78     private final AVXKind.AVXSize vectorSize;
  79     private final boolean signExtend;
  80 
  81     @Def({REG}) private Value resultValue;
  82     @Alive({REG}) private Value array1Value;
  83     @Alive({REG}) private Value array2Value;
  84     @Alive({REG, CONST}) private Value lengthValue;
  85     @Temp({REG, ILLEGAL}) private Value temp1;
  86     @Temp({REG, ILLEGAL}) private Value temp2;
  87     @Temp({REG}) private Value temp3;
  88     @Temp({REG, ILLEGAL}) private Value temp4;
  89 
  90     @Temp({REG, ILLEGAL}) private Value temp5;
  91     @Temp({REG, ILLEGAL}) private Value tempXMM;
  92 
  93     @Temp({REG, ILLEGAL}) private Value vectorTemp1;
  94     @Temp({REG, ILLEGAL}) private Value vectorTemp2;
  95     @Temp({REG, ILLEGAL}) private Value vectorTemp3;
  96     @Temp({REG, ILLEGAL}) private Value vectorTemp4;
  97 
  98     public AMD64ArrayEqualsOp(LIRGeneratorTool tool, JavaKind kind1, JavaKind kind2, Value result, Value array1, Value array2, Value length,
  99                     boolean directPointers, int maxVectorSize) {
 100         super(TYPE);
 101         this.kind1 = kind1;
 102         this.kind2 = kind2;
 103         this.signExtend = kind1 != JavaKind.Char && kind2 != JavaKind.Char;
 104 
 105         assert kind1.isNumericInteger() && kind2.isNumericInteger() || kind1 == kind2;
 106 
 107         this.arrayBaseOffset1 = directPointers ? 0 : tool.getProviders().getMetaAccess().getArrayBaseOffset(kind1);
 108         this.arrayBaseOffset2 = directPointers ? 0 : tool.getProviders().getMetaAccess().getArrayBaseOffset(kind2);
 109         this.arrayIndexScale1 = Objects.requireNonNull(Scale.fromInt(tool.getProviders().getMetaAccess().getArrayIndexScale(kind1)));
 110         this.arrayIndexScale2 = Objects.requireNonNull(Scale.fromInt(tool.getProviders().getMetaAccess().getArrayIndexScale(kind2)));
 111         this.vectorSize = ((AMD64) tool.target().arch).getFeatures().contains(CPUFeature.AVX2) && (maxVectorSize < 0 || maxVectorSize >= 32) ? AVXKind.AVXSize.YMM : AVXKind.AVXSize.XMM;
 112 
 113         this.resultValue = result;
 114         this.array1Value = array1;
 115         this.array2Value = array2;
 116         this.lengthValue = length;
 117 
 118         // Allocate some temporaries.
 119         if (supportsSSE41(tool.target()) && canGenerateConstantLengthCompare(tool.target()) && !constantLengthCompareNeedsTmpArrayPointers()) {
 120             this.temp1 = Value.ILLEGAL;
 121             this.temp2 = Value.ILLEGAL;
 122         } else {
 123             this.temp1 = tool.newVariable(LIRKind.unknownReference(tool.target().arch.getWordKind()));
 124             this.temp2 = tool.newVariable(LIRKind.unknownReference(tool.target().arch.getWordKind()));
 125         }
 126         this.temp3 = tool.newVariable(LIRKind.value(tool.target().arch.getWordKind()));
 127         if (supportsSSE41(tool.target()) && canGenerateConstantLengthCompare(tool.target())) {
 128             this.temp4 = Value.ILLEGAL;
 129             this.temp5 = Value.ILLEGAL;
 130         } else {
 131             this.temp4 = tool.newVariable(LIRKind.value(tool.target().arch.getWordKind()));
 132             this.temp5 = kind1.isNumericFloat() || kind1 != kind2 ? tool.newVariable(LIRKind.value(tool.target().arch.getWordKind())) : Value.ILLEGAL;
 133         }
 134 
 135         if (kind1 == JavaKind.Float) {
 136             this.tempXMM = tool.newVariable(LIRKind.value(AMD64Kind.SINGLE));
 137         } else if (kind1 == JavaKind.Double) {
 138             this.tempXMM = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
 139         } else {
 140             this.tempXMM = Value.ILLEGAL;
 141         }
 142 
 143         // We only need the vector temporaries if we generate SSE code.
 144         if (supportsSSE41(tool.target())) {
 145             if (canGenerateConstantLengthCompare(tool.target())) {
 146                 LIRKind lirKind = LIRKind.value(vectorSize == AVXKind.AVXSize.YMM ? AMD64Kind.V256_BYTE : AMD64Kind.V128_BYTE);
 147                 this.vectorTemp1 = tool.newVariable(lirKind);
 148                 this.vectorTemp2 = tool.newVariable(lirKind);
 149                 this.vectorTemp3 = tool.newVariable(lirKind);
 150                 this.vectorTemp4 = tool.newVariable(lirKind);
 151             } else {
 152                 this.vectorTemp1 = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
 153                 this.vectorTemp2 = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
 154                 this.vectorTemp3 = Value.ILLEGAL;
 155                 this.vectorTemp4 = Value.ILLEGAL;
 156             }
 157         } else {
 158             this.vectorTemp1 = Value.ILLEGAL;
 159             this.vectorTemp2 = Value.ILLEGAL;
 160             this.vectorTemp3 = Value.ILLEGAL;
 161             this.vectorTemp4 = Value.ILLEGAL;
 162         }
 163     }
 164 
 165     private boolean canGenerateConstantLengthCompare(TargetDescription target) {
 166         return LIRValueUtil.isJavaConstant(lengthValue) && kind1.isNumericInteger() && (kind1 == kind2 || getElementsPerVector(AVXKind.AVXSize.XMM) <= constantLength()) && supportsSSE41(target);
 167     }
 168 
 169     private int constantLength() {
 170         return LIRValueUtil.asJavaConstant(lengthValue).asInt();
 171     }
 172 
 173     @Override
 174     public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 175         Register result = asRegister(resultValue);
 176 
 177         Label trueLabel = new Label();
 178         Label falseLabel = new Label();
 179         Label done = new Label();
 180 
 181         if (canGenerateConstantLengthCompare(crb.target)) {
 182             emitConstantLengthArrayCompareBytes(crb, masm, new Register[]{asRegister(vectorTemp1), asRegister(vectorTemp2), asRegister(vectorTemp3), asRegister(vectorTemp4)}, falseLabel);
 183         } else {
 184             Register array1 = asRegister(temp1);
 185             Register array2 = asRegister(temp2);
 186             // Load array base addresses.
 187             masm.leaq(array1, new AMD64Address(asRegister(array1Value), arrayBaseOffset1));
 188             masm.leaq(array2, new AMD64Address(asRegister(array2Value), arrayBaseOffset2));
 189             Register length = asRegister(temp3);
 190             // Get array length.
 191             if (LIRValueUtil.isJavaConstant(lengthValue)) {
 192                 masm.movl(length, constantLength());
 193             } else {
 194                 masm.movl(length, asRegister(lengthValue));
 195             }
 196             // copy
 197             masm.movl(result, length);
 198             emitArrayCompare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 199         }
 200 
 201         // Return true
 202         masm.bind(trueLabel);
 203         masm.movl(result, 1);
 204         masm.jmpb(done);
 205 
 206         // Return false
 207         masm.bind(falseLabel);
 208         masm.xorl(result, result);
 209 
 210         // That's it
 211         masm.bind(done);
 212     }
 213 
 214     private void emitArrayCompare(CompilationResultBuilder crb, AMD64MacroAssembler masm,
 215                     Register result, Register array1, Register array2, Register length,
 216                     Label trueLabel, Label falseLabel) {
 217         if (supportsSSE41(crb.target)) {
 218             emitVectorCompare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 219         }
 220         if (kind1 == kind2) {
 221             emit8ByteCompare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 222             emitTailCompares(masm, result, array1, array2, length, trueLabel, falseLabel);
 223         } else {
 224             emitDifferentKindsElementWiseCompare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 225         }
 226     }
 227 
 228     /**
 229      * Returns if the underlying AMD64 architecture supports SSE 4.1 instructions.
 230      *
 231      * @param target target description of the underlying architecture
 232      * @return true if the underlying architecture supports SSE 4.1
 233      */
 234     private static boolean supportsSSE41(TargetDescription target) {
 235         AMD64 arch = (AMD64) target.arch;
 236         return arch.getFeatures().contains(CPUFeature.SSE4_1);
 237     }
 238 
 239     /**
 240      * Emits code that uses SSE4.1/AVX1 128-bit (16-byte) or AVX2 256-bit (32-byte) vector compares.
 241      */
 242     private void emitVectorCompare(CompilationResultBuilder crb, AMD64MacroAssembler masm,
 243                     Register result, Register array1, Register array2, Register length,
 244                     Label trueLabel, Label falseLabel) {
 245         assert supportsSSE41(crb.target);
 246 
 247         Register vector1 = asRegister(vectorTemp1);
 248         Register vector2 = asRegister(vectorTemp2);
 249 
 250         int elementsPerVector = getElementsPerVector(vectorSize);
 251 
 252         Label loop = new Label();
 253         Label compareTail = new Label();
 254 
 255         boolean requiresNaNCheck = kind1.isNumericFloat();
 256         Label loopCheck = new Label();
 257         Label nanCheck = new Label();
 258 
 259         // Compare 16-byte vectors
 260         masm.andl(result, elementsPerVector - 1); // tail count
 261         masm.andl(length, ~(elementsPerVector - 1)); // vector count
 262         masm.jcc(ConditionFlag.Zero, compareTail);
 263 
 264         masm.leaq(array1, new AMD64Address(array1, length, arrayIndexScale1, 0));
 265         masm.leaq(array2, new AMD64Address(array2, length, arrayIndexScale2, 0));
 266         masm.negq(length);
 267 
 268         // Align the main loop
 269         masm.align(crb.target.wordSize * 2);
 270         masm.bind(loop);
 271         emitVectorLoad1(masm, vector1, array1, length, 0, vectorSize);
 272         emitVectorLoad2(masm, vector2, array2, length, 0, vectorSize);
 273         emitVectorCmp(masm, vector1, vector2, vectorSize);
 274         masm.jcc(ConditionFlag.NotZero, requiresNaNCheck ? nanCheck : falseLabel);
 275 
 276         masm.bind(loopCheck);
 277         masm.addq(length, elementsPerVector);
 278         masm.jcc(ConditionFlag.NotZero, loop);
 279 
 280         masm.testl(result, result);
 281         masm.jcc(ConditionFlag.Zero, trueLabel);
 282 
 283         if (requiresNaNCheck) {
 284             Label unalignedCheck = new Label();
 285             masm.jmpb(unalignedCheck);
 286             masm.bind(nanCheck);
 287             emitFloatCompareWithinRange(crb, masm, array1, array2, length, 0, falseLabel, elementsPerVector);
 288             masm.jmpb(loopCheck);
 289             masm.bind(unalignedCheck);
 290         }
 291 
 292         /*
 293          * Compare the remaining bytes with an unaligned memory load aligned to the end of the
 294          * array.
 295          */
 296         emitVectorLoad1(masm, vector1, array1, result, scaleDisplacement1(-vectorSize.getBytes()), vectorSize);
 297         emitVectorLoad2(masm, vector2, array2, result, scaleDisplacement2(-vectorSize.getBytes()), vectorSize);
 298         emitVectorCmp(masm, vector1, vector2, vectorSize);
 299         if (requiresNaNCheck) {
 300             masm.jcc(ConditionFlag.Zero, trueLabel);
 301             emitFloatCompareWithinRange(crb, masm, array1, array2, result, -vectorSize.getBytes(), falseLabel, elementsPerVector);
 302         } else {
 303             masm.jcc(ConditionFlag.NotZero, falseLabel);
 304         }
 305         masm.jmp(trueLabel);
 306 
 307         masm.bind(compareTail);
 308         masm.movl(length, result);
 309     }
 310 
 311     private int getElementsPerVector(AVXKind.AVXSize vSize) {
 312         return vSize.getBytes() >> Math.max(arrayIndexScale1.log2, arrayIndexScale2.log2);
 313     }
 314 
 315     private void emitVectorLoad1(AMD64MacroAssembler asm, Register dst, Register src, int displacement, AVXKind.AVXSize size) {
 316         emitVectorLoad1(asm, dst, src, Register.None, displacement, size);
 317     }
 318 
 319     private void emitVectorLoad2(AMD64MacroAssembler asm, Register dst, Register src, int displacement, AVXKind.AVXSize size) {
 320         emitVectorLoad2(asm, dst, src, Register.None, displacement, size);
 321     }
 322 
 323     private void emitVectorLoad1(AMD64MacroAssembler asm, Register dst, Register src, Register index, int displacement, AVXKind.AVXSize size) {
 324         emitVectorLoad(asm, dst, src, index, displacement, arrayIndexScale1, arrayIndexScale2, size);
 325     }
 326 
 327     private void emitVectorLoad2(AMD64MacroAssembler asm, Register dst, Register src, Register index, int displacement, AVXKind.AVXSize size) {
 328         emitVectorLoad(asm, dst, src, index, displacement, arrayIndexScale2, arrayIndexScale1, size);
 329     }
 330 
 331     private void emitVectorLoad(AMD64MacroAssembler asm, Register dst, Register src, Register index, int displacement, Scale ownScale, Scale otherScale, AVXKind.AVXSize size) {
 332         AMD64Address address = new AMD64Address(src, index, ownScale, displacement);
 333         if (ownScale.value < otherScale.value) {
 334             if (size == AVXKind.AVXSize.YMM) {
 335                 getAVX2LoadAndExtendOp(ownScale, otherScale, signExtend).emit(asm, size, dst, address);
 336             } else {
 337                 loadAndExtendSSE(asm, dst, address, ownScale, otherScale, signExtend);
 338             }
 339         } else {
 340             if (size == AVXKind.AVXSize.YMM) {
 341                 asm.vmovdqu(dst, address);
 342             } else {
 343                 asm.movdqu(dst, address);
 344             }
 345         }
 346     }
 347 
 348     private int scaleDisplacement1(int displacement) {
 349         return scaleDisplacement(displacement, arrayIndexScale1, arrayIndexScale2);
 350     }
 351 
 352     private int scaleDisplacement2(int displacement) {
 353         return scaleDisplacement(displacement, arrayIndexScale2, arrayIndexScale1);
 354     }
 355 
 356     private static int scaleDisplacement(int displacement, Scale ownScale, Scale otherScale) {
 357         if (ownScale.value < otherScale.value) {
 358             return displacement >> (otherScale.log2 - ownScale.log2);
 359         }
 360         return displacement;
 361     }
 362 
 363     private static AMD64Assembler.VexRMOp getAVX2LoadAndExtendOp(Scale ownScale, Scale otherScale, boolean signExtend) {
 364         switch (ownScale) {
 365             case Times1:
 366                 switch (otherScale) {
 367                     case Times2:
 368                         return signExtend ? AMD64Assembler.VexRMOp.VPMOVSXBW : AMD64Assembler.VexRMOp.VPMOVZXBW;
 369                     case Times4:
 370                         return signExtend ? AMD64Assembler.VexRMOp.VPMOVSXBD : AMD64Assembler.VexRMOp.VPMOVZXBD;
 371                     case Times8:
 372                         return signExtend ? AMD64Assembler.VexRMOp.VPMOVSXBQ : AMD64Assembler.VexRMOp.VPMOVZXBQ;
 373                 }
 374                 throw GraalError.shouldNotReachHere();
 375             case Times2:
 376                 switch (otherScale) {
 377                     case Times4:
 378                         return signExtend ? AMD64Assembler.VexRMOp.VPMOVSXWD : AMD64Assembler.VexRMOp.VPMOVZXWD;
 379                     case Times8:
 380                         return signExtend ? AMD64Assembler.VexRMOp.VPMOVSXWQ : AMD64Assembler.VexRMOp.VPMOVZXWQ;
 381                 }
 382                 throw GraalError.shouldNotReachHere();
 383             case Times4:
 384                 return signExtend ? AMD64Assembler.VexRMOp.VPMOVSXDQ : AMD64Assembler.VexRMOp.VPMOVZXDQ;
 385         }
 386         throw GraalError.shouldNotReachHere();
 387     }
 388 
 389     private static void loadAndExtendSSE(AMD64MacroAssembler asm, Register dst, AMD64Address src, Scale ownScale, Scale otherScale, boolean signExtend) {
 390         switch (ownScale) {
 391             case Times1:
 392                 switch (otherScale) {
 393                     case Times2:
 394                         if (signExtend) {
 395                             asm.pmovsxbw(dst, src);
 396                         } else {
 397                             asm.pmovzxbw(dst, src);
 398                         }
 399                         return;
 400                     case Times4:
 401                         if (signExtend) {
 402                             asm.pmovsxbd(dst, src);
 403                         } else {
 404                             asm.pmovzxbd(dst, src);
 405                         }
 406                         return;
 407                     case Times8:
 408                         if (signExtend) {
 409                             asm.pmovsxbq(dst, src);
 410                         } else {
 411                             asm.pmovzxbq(dst, src);
 412                         }
 413                         return;
 414                 }
 415                 throw GraalError.shouldNotReachHere();
 416             case Times2:
 417                 switch (otherScale) {
 418                     case Times4:
 419                         if (signExtend) {
 420                             asm.pmovsxwd(dst, src);
 421                         } else {
 422                             asm.pmovzxwd(dst, src);
 423                         }
 424                         return;
 425                     case Times8:
 426                         if (signExtend) {
 427                             asm.pmovsxwq(dst, src);
 428                         } else {
 429                             asm.pmovzxwq(dst, src);
 430                         }
 431                         return;
 432                 }
 433                 throw GraalError.shouldNotReachHere();
 434             case Times4:
 435                 if (signExtend) {
 436                     asm.pmovsxdq(dst, src);
 437                 } else {
 438                     asm.pmovzxdq(dst, src);
 439                 }
 440                 return;
 441         }
 442         throw GraalError.shouldNotReachHere();
 443     }
 444 
 445     private static void emitVectorCmp(AMD64MacroAssembler masm, Register vector1, Register vector2, AVXKind.AVXSize size) {
 446         emitVectorXor(masm, vector1, vector2, size);
 447         emitVectorTest(masm, vector1, size);
 448     }
 449 
 450     private static void emitVectorXor(AMD64MacroAssembler masm, Register vector1, Register vector2, AVXKind.AVXSize size) {
 451         if (size == AVXKind.AVXSize.YMM) {
 452             masm.vpxor(vector1, vector1, vector2);
 453         } else {
 454             masm.pxor(vector1, vector2);
 455         }
 456     }
 457 
 458     private static void emitVectorTest(AMD64MacroAssembler masm, Register vector1, AVXKind.AVXSize size) {
 459         if (size == AVXKind.AVXSize.YMM) {
 460             masm.vptest(vector1, vector1);
 461         } else {
 462             masm.ptest(vector1, vector1);
 463         }
 464     }
 465 
 466     /**
 467      * Vector size used in {@link #emit8ByteCompare}.
 468      */
 469     private static final int VECTOR_SIZE = 8;
 470 
 471     /**
 472      * Emits code that uses 8-byte vector compares.
 473      */
 474     private void emit8ByteCompare(CompilationResultBuilder crb, AMD64MacroAssembler masm,
 475                     Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 476         assert kind1 == kind2;
 477         Label loop = new Label();
 478         Label compareTail = new Label();
 479 
 480         int elementsPerVector = 8 >> arrayIndexScale1.log2;
 481 
 482         boolean requiresNaNCheck = kind1.isNumericFloat();
 483         Label loopCheck = new Label();
 484         Label nanCheck = new Label();
 485 
 486         Register temp = asRegister(temp4);
 487 
 488         masm.andl(result, elementsPerVector - 1); // tail count
 489         masm.andl(length, ~(elementsPerVector - 1));  // vector count
 490         masm.jcc(ConditionFlag.Zero, compareTail);
 491 
 492         masm.leaq(array1, new AMD64Address(array1, length, arrayIndexScale1, 0));
 493         masm.leaq(array2, new AMD64Address(array2, length, arrayIndexScale2, 0));
 494         masm.negq(length);
 495 
 496         // Align the main loop
 497         masm.align(crb.target.wordSize * 2);
 498         masm.bind(loop);
 499         masm.movq(temp, new AMD64Address(array1, length, arrayIndexScale1, 0));
 500         masm.cmpq(temp, new AMD64Address(array2, length, arrayIndexScale2, 0));
 501         masm.jcc(ConditionFlag.NotEqual, requiresNaNCheck ? nanCheck : falseLabel);
 502 
 503         masm.bind(loopCheck);
 504         masm.addq(length, elementsPerVector);
 505         masm.jccb(ConditionFlag.NotZero, loop);
 506 
 507         masm.testl(result, result);
 508         masm.jcc(ConditionFlag.Zero, trueLabel);
 509 
 510         if (requiresNaNCheck) {
 511             // NaN check is slow path and hence placed outside of the main loop.
 512             Label unalignedCheck = new Label();
 513             masm.jmpb(unalignedCheck);
 514             masm.bind(nanCheck);
 515             // At most two iterations, unroll in the emitted code.
 516             for (int offset = 0; offset < VECTOR_SIZE; offset += kind1.getByteCount()) {
 517                 emitFloatCompare(masm, array1, array2, length, offset, falseLabel, kind1.getByteCount() == VECTOR_SIZE);
 518             }
 519             masm.jmpb(loopCheck);
 520             masm.bind(unalignedCheck);
 521         }
 522 
 523         /*
 524          * Compare the remaining bytes with an unaligned memory load aligned to the end of the
 525          * array.
 526          */
 527         masm.movq(temp, new AMD64Address(array1, result, arrayIndexScale1, -VECTOR_SIZE));
 528         masm.cmpq(temp, new AMD64Address(array2, result, arrayIndexScale2, -VECTOR_SIZE));
 529         if (requiresNaNCheck) {
 530             masm.jcc(ConditionFlag.Equal, trueLabel);
 531             // At most two iterations, unroll in the emitted code.
 532             for (int offset = 0; offset < VECTOR_SIZE; offset += kind1.getByteCount()) {
 533                 emitFloatCompare(masm, array1, array2, result, -VECTOR_SIZE + offset, falseLabel, kind1.getByteCount() == VECTOR_SIZE);
 534             }
 535         } else {
 536             masm.jccb(ConditionFlag.NotEqual, falseLabel);
 537         }
 538         masm.jmpb(trueLabel);
 539 
 540         masm.bind(compareTail);
 541         masm.movl(length, result);
 542     }
 543 
 544     /**
 545      * Emits code to compare the remaining 1 to 4 bytes.
 546      */
 547     private void emitTailCompares(AMD64MacroAssembler masm,
 548                     Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 549         assert kind1 == kind2;
 550         Label compare2Bytes = new Label();
 551         Label compare1Byte = new Label();
 552 
 553         Register temp = asRegister(temp4);
 554 
 555         if (kind1.getByteCount() <= 4) {
 556             // Compare trailing 4 bytes, if any.
 557             masm.testl(result, arrayIndexScale1.log2 == 0 ? 4 : 4 >> arrayIndexScale1.log2);
 558             masm.jccb(ConditionFlag.Zero, compare2Bytes);
 559             masm.movl(temp, new AMD64Address(array1, 0));
 560             masm.cmpl(temp, new AMD64Address(array2, 0));
 561             if (kind1 == JavaKind.Float) {
 562                 masm.jccb(ConditionFlag.Equal, trueLabel);
 563                 emitFloatCompare(masm, array1, array2, Register.None, 0, falseLabel, true);
 564                 masm.jmpb(trueLabel);
 565             } else {
 566                 masm.jccb(ConditionFlag.NotEqual, falseLabel);
 567             }
 568             if (kind1.getByteCount() <= 2) {
 569                 // Move array pointers forward.
 570                 masm.leaq(array1, new AMD64Address(array1, 4));
 571                 masm.leaq(array2, new AMD64Address(array2, 4));
 572 
 573                 // Compare trailing 2 bytes, if any.
 574                 masm.bind(compare2Bytes);
 575                 masm.testl(result, arrayIndexScale1.log2 == 0 ? 2 : 2 >> arrayIndexScale1.log2);
 576                 masm.jccb(ConditionFlag.Zero, compare1Byte);
 577                 masm.movzwl(temp, new AMD64Address(array1, 0));
 578                 masm.movzwl(length, new AMD64Address(array2, 0));
 579                 masm.cmpl(temp, length);
 580                 masm.jccb(ConditionFlag.NotEqual, falseLabel);
 581 
 582                 // The one-byte tail compare is only required for boolean and byte arrays.
 583                 if (kind1.getByteCount() <= 1) {
 584                     // Move array pointers forward before we compare the last trailing byte.
 585                     masm.leaq(array1, new AMD64Address(array1, 2));
 586                     masm.leaq(array2, new AMD64Address(array2, 2));
 587 
 588                     // Compare trailing byte, if any.
 589                     masm.bind(compare1Byte);
 590                     masm.testl(result, 1);
 591                     masm.jccb(ConditionFlag.Zero, trueLabel);
 592                     masm.movzbl(temp, new AMD64Address(array1, 0));
 593                     masm.movzbl(length, new AMD64Address(array2, 0));
 594                     masm.cmpl(temp, length);
 595                     masm.jccb(ConditionFlag.NotEqual, falseLabel);
 596                 } else {
 597                     masm.bind(compare1Byte);
 598                 }
 599             } else {
 600                 masm.bind(compare2Bytes);
 601             }
 602         }
 603     }
 604 
 605     private void emitDifferentKindsElementWiseCompare(CompilationResultBuilder crb, AMD64MacroAssembler masm,
 606                     Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 607         assert kind1 != kind2;
 608         assert kind1.isNumericInteger() && kind2.isNumericInteger();
 609         Label loop = new Label();
 610         Label compareTail = new Label();
 611 
 612         int elementsPerLoopIteration = 4;
 613 
 614         Register tmp1 = asRegister(temp4);
 615         Register tmp2 = asRegister(temp5);
 616 
 617         masm.andl(result, elementsPerLoopIteration - 1); // tail count
 618         masm.andl(length, ~(elementsPerLoopIteration - 1));  // bulk loop count
 619         masm.jcc(ConditionFlag.Zero, compareTail);
 620 
 621         masm.leaq(array1, new AMD64Address(array1, length, arrayIndexScale1, 0));
 622         masm.leaq(array2, new AMD64Address(array2, length, arrayIndexScale2, 0));
 623         masm.negq(length);
 624 
 625         // clear comparison registers because of the missing movzlq instruction
 626         masm.xorq(tmp1, tmp1);
 627         masm.xorq(tmp2, tmp2);
 628 
 629         // Align the main loop
 630         masm.align(crb.target.wordSize * 2);
 631         masm.bind(loop);
 632         for (int i = 0; i < elementsPerLoopIteration; i++) {
 633             emitMovBytes(masm, tmp1, new AMD64Address(array1, length, arrayIndexScale1, i << arrayIndexScale1.log2), kind1.getByteCount());
 634             emitMovBytes(masm, tmp2, new AMD64Address(array2, length, arrayIndexScale2, i << arrayIndexScale2.log2), kind2.getByteCount());
 635             masm.cmpq(tmp1, tmp2);
 636             masm.jcc(ConditionFlag.NotEqual, falseLabel);
 637         }
 638         masm.addq(length, elementsPerLoopIteration);
 639         masm.jccb(ConditionFlag.NotZero, loop);
 640 
 641         masm.bind(compareTail);
 642         masm.testl(result, result);
 643         masm.jcc(ConditionFlag.Zero, trueLabel);
 644         for (int i = 0; i < elementsPerLoopIteration - 1; i++) {
 645             emitMovBytes(masm, tmp1, new AMD64Address(array1, length, arrayIndexScale1, 0), kind1.getByteCount());
 646             emitMovBytes(masm, tmp2, new AMD64Address(array2, length, arrayIndexScale2, 0), kind2.getByteCount());
 647             masm.cmpq(tmp1, tmp2);
 648             masm.jcc(ConditionFlag.NotEqual, falseLabel);
 649             if (i < elementsPerLoopIteration - 2) {
 650                 masm.incrementq(length, 1);
 651                 masm.decrementq(result, 1);
 652                 masm.jcc(ConditionFlag.Zero, trueLabel);
 653             } else {
 654                 masm.jmpb(trueLabel);
 655             }
 656         }
 657     }
 658 
 659     /**
 660      * Emits code to fall through if {@code src} is NaN, otherwise jump to {@code branchOrdered}.
 661      */
 662     private void emitNaNCheck(AMD64MacroAssembler masm, AMD64Address src, Label branchIfNonNaN) {
 663         assert kind1.isNumericFloat();
 664         Register tempXMMReg = asRegister(tempXMM);
 665         if (kind1 == JavaKind.Float) {
 666             masm.movflt(tempXMMReg, src);
 667         } else {
 668             masm.movdbl(tempXMMReg, src);
 669         }
 670         SSEOp.UCOMIS.emit(masm, kind1 == JavaKind.Float ? OperandSize.PS : OperandSize.PD, tempXMMReg, tempXMMReg);
 671         masm.jcc(ConditionFlag.NoParity, branchIfNonNaN);
 672     }
 673 
 674     /**
 675      * Emits code to compare if two floats are bitwise equal or both NaN.
 676      */
 677     private void emitFloatCompare(AMD64MacroAssembler masm, Register base1, Register base2, Register index, int offset, Label falseLabel,
 678                     boolean skipBitwiseCompare) {
 679         AMD64Address address1 = new AMD64Address(base1, index, arrayIndexScale1, offset);
 680         AMD64Address address2 = new AMD64Address(base2, index, arrayIndexScale2, offset);
 681 
 682         Label bitwiseEqual = new Label();
 683 
 684         if (!skipBitwiseCompare) {
 685             // Bitwise compare
 686             Register temp = asRegister(temp4);
 687 
 688             if (kind1 == JavaKind.Float) {
 689                 masm.movl(temp, address1);
 690                 masm.cmpl(temp, address2);
 691             } else {
 692                 masm.movq(temp, address1);
 693                 masm.cmpq(temp, address2);
 694             }
 695             masm.jccb(ConditionFlag.Equal, bitwiseEqual);
 696         }
 697 
 698         emitNaNCheck(masm, address1, falseLabel);
 699         emitNaNCheck(masm, address2, falseLabel);
 700 
 701         masm.bind(bitwiseEqual);
 702     }
 703 
 704     /**
 705      * Emits code to compare float equality within a range.
 706      */
 707     private void emitFloatCompareWithinRange(CompilationResultBuilder crb, AMD64MacroAssembler masm,
 708                     Register base1, Register base2, Register index, int offset, Label falseLabel, int range) {
 709         assert kind1.isNumericFloat();
 710         Label loop = new Label();
 711         Register i = asRegister(temp5);
 712 
 713         masm.movq(i, range);
 714         masm.negq(i);
 715         // Align the main loop
 716         masm.align(crb.target.wordSize * 2);
 717         masm.bind(loop);
 718         emitFloatCompare(masm, base1, base2, index, offset, falseLabel, range == 1);
 719         masm.incrementq(index, 1);
 720         masm.incrementq(i, 1);
 721         masm.jccb(ConditionFlag.NotZero, loop);
 722         // Floats within the range are equal, revert change to the register index
 723         masm.subq(index, range);
 724     }
 725 
 726     private boolean constantLengthCompareNeedsTmpArrayPointers() {
 727         AVXKind.AVXSize vSize = vectorSize;
 728         if (constantLength() < getElementsPerVector(vectorSize)) {
 729             vSize = AVXKind.AVXSize.XMM;
 730         }
 731         int vectorCount = constantLength() & ~(2 * getElementsPerVector(vSize) - 1);
 732         return vectorCount > 0;
 733     }
 734 
 735     /**
 736      * Emits specialized assembly for checking equality of memory regions
 737      * {@code arrayPtr1[0..nBytes]} and {@code arrayPtr2[0..nBytes]}. If they match, execution
 738      * continues directly after the emitted code block, otherwise we jump to {@code noMatch}.
 739      */
 740     private void emitConstantLengthArrayCompareBytes(
 741                     CompilationResultBuilder crb,
 742                     AMD64MacroAssembler asm,
 743                     Register[] tmpVectors,
 744                     Label noMatch) {
 745         if (constantLength() == 0) {
 746             // do nothing
 747             return;
 748         }
 749         Register arrayPtr1 = asRegister(array1Value);
 750         Register arrayPtr2 = asRegister(array2Value);
 751         Register tmp = asRegister(temp3);
 752         AVXKind.AVXSize vSize = vectorSize;
 753         if (constantLength() < getElementsPerVector(vectorSize)) {
 754             vSize = AVXKind.AVXSize.XMM;
 755         }
 756         int elementsPerVector = getElementsPerVector(vSize);
 757         if (elementsPerVector > constantLength()) {
 758             assert kind1 == kind2;
 759             int byteLength = constantLength() << arrayIndexScale1.log2;
 760             // array is shorter than any vector register, use regular XOR instructions
 761             int movSize = (byteLength < 2) ? 1 : ((byteLength < 4) ? 2 : ((byteLength < 8) ? 4 : 8));
 762             emitMovBytes(asm, tmp, new AMD64Address(arrayPtr1, arrayBaseOffset1), movSize);
 763             emitXorBytes(asm, tmp, new AMD64Address(arrayPtr2, arrayBaseOffset2), movSize);
 764             asm.jccb(AMD64Assembler.ConditionFlag.NotZero, noMatch);
 765             if (byteLength > movSize) {
 766                 emitMovBytes(asm, tmp, new AMD64Address(arrayPtr1, arrayBaseOffset1 + byteLength - movSize), movSize);
 767                 emitXorBytes(asm, tmp, new AMD64Address(arrayPtr2, arrayBaseOffset2 + byteLength - movSize), movSize);
 768                 asm.jccb(AMD64Assembler.ConditionFlag.NotZero, noMatch);
 769             }
 770         } else {
 771             int elementsPerVectorLoop = 2 * elementsPerVector;
 772             int tailCount = constantLength() & (elementsPerVectorLoop - 1);
 773             int vectorCount = constantLength() & ~(elementsPerVectorLoop - 1);
 774             int bytesPerVector = vSize.getBytes();
 775             if (vectorCount > 0) {
 776                 Label loopBegin = new Label();
 777                 Register tmpArrayPtr1 = asRegister(temp1);
 778                 Register tmpArrayPtr2 = asRegister(temp2);
 779                 asm.leaq(tmpArrayPtr1, new AMD64Address(arrayPtr1, vectorCount << arrayIndexScale1.log2));
 780                 asm.leaq(tmpArrayPtr2, new AMD64Address(arrayPtr2, vectorCount << arrayIndexScale2.log2));
 781                 arrayPtr1 = tmpArrayPtr1;
 782                 arrayPtr2 = tmpArrayPtr2;
 783                 asm.movq(tmp, -vectorCount);
 784                 asm.align(crb.target.wordSize * 2);
 785                 asm.bind(loopBegin);
 786                 emitVectorLoad1(asm, tmpVectors[0], arrayPtr1, tmp, arrayBaseOffset1, vSize);
 787                 emitVectorLoad2(asm, tmpVectors[1], arrayPtr2, tmp, arrayBaseOffset2, vSize);
 788                 emitVectorLoad1(asm, tmpVectors[2], arrayPtr1, tmp, arrayBaseOffset1 + scaleDisplacement1(bytesPerVector), vSize);
 789                 emitVectorLoad2(asm, tmpVectors[3], arrayPtr2, tmp, arrayBaseOffset2 + scaleDisplacement2(bytesPerVector), vSize);
 790                 emitVectorXor(asm, tmpVectors[0], tmpVectors[1], vSize);
 791                 emitVectorXor(asm, tmpVectors[2], tmpVectors[3], vSize);
 792                 emitVectorTest(asm, tmpVectors[0], vSize);
 793                 asm.jccb(AMD64Assembler.ConditionFlag.NotZero, noMatch);
 794                 emitVectorTest(asm, tmpVectors[2], vSize);
 795                 asm.jccb(AMD64Assembler.ConditionFlag.NotZero, noMatch);
 796                 asm.addq(tmp, elementsPerVectorLoop);
 797                 asm.jccb(AMD64Assembler.ConditionFlag.NotZero, loopBegin);
 798             }
 799             if (tailCount > 0) {
 800                 emitVectorLoad1(asm, tmpVectors[0], arrayPtr1, arrayBaseOffset1 + (tailCount << arrayIndexScale1.log2) - scaleDisplacement1(bytesPerVector), vSize);
 801                 emitVectorLoad2(asm, tmpVectors[1], arrayPtr2, arrayBaseOffset2 + (tailCount << arrayIndexScale2.log2) - scaleDisplacement2(bytesPerVector), vSize);
 802                 emitVectorXor(asm, tmpVectors[0], tmpVectors[1], vSize);
 803                 if (tailCount > elementsPerVector) {
 804                     emitVectorLoad1(asm, tmpVectors[2], arrayPtr1, arrayBaseOffset1, vSize);
 805                     emitVectorLoad2(asm, tmpVectors[3], arrayPtr2, arrayBaseOffset2, vSize);
 806                     emitVectorXor(asm, tmpVectors[2], tmpVectors[3], vSize);
 807                     emitVectorTest(asm, tmpVectors[2], vSize);
 808                     asm.jccb(AMD64Assembler.ConditionFlag.NotZero, noMatch);
 809                 }
 810                 emitVectorTest(asm, tmpVectors[0], vSize);
 811                 asm.jccb(AMD64Assembler.ConditionFlag.NotZero, noMatch);
 812             }
 813         }
 814     }
 815 
 816     private void emitMovBytes(AMD64MacroAssembler asm, Register dst, AMD64Address src, int size) {
 817         switch (size) {
 818             case 1:
 819                 if (signExtend) {
 820                     asm.movsbq(dst, src);
 821                 } else {
 822                     asm.movzbq(dst, src);
 823                 }
 824                 break;
 825             case 2:
 826                 if (signExtend) {
 827                     asm.movswq(dst, src);
 828                 } else {
 829                     asm.movzwq(dst, src);
 830                 }
 831                 break;
 832             case 4:
 833                 if (signExtend) {
 834                     asm.movslq(dst, src);
 835                 } else {
 836                     // there is no movzlq
 837                     asm.movl(dst, src);
 838                 }
 839                 break;
 840             case 8:
 841                 asm.movq(dst, src);
 842                 break;
 843             default:
 844                 throw new IllegalStateException();
 845         }
 846     }
 847 
 848     private static void emitXorBytes(AMD64MacroAssembler asm, Register dst, AMD64Address src, int size) {
 849         OperandSize opSize = getOperandSize(size);
 850         XOR.getRMOpcode(opSize).emit(asm, opSize, dst, src);
 851     }
 852 
 853     private static OperandSize getOperandSize(int size) {
 854         switch (size) {
 855             case 1:
 856                 return OperandSize.BYTE;
 857             case 2:
 858                 return OperandSize.WORD;
 859             case 4:
 860                 return OperandSize.DWORD;
 861             case 8:
 862                 return OperandSize.QWORD;
 863             default:
 864                 throw new IllegalStateException();
 865         }
 866     }
 867 }