1 /*
   2  * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 package org.graalvm.compiler.lir.amd64;
  24 
  25 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
  26 import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
  27 import static jdk.vm.ci.code.ValueUtil.asRegister;
  28 
  29 import java.lang.reflect.Array;
  30 import java.lang.reflect.Field;
  31 
  32 import org.graalvm.compiler.asm.Label;
  33 import org.graalvm.compiler.asm.amd64.AMD64Address;
  34 import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
  35 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
  36 import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
  37 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
  38 import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
  39 import org.graalvm.compiler.core.common.LIRKind;
  40 import org.graalvm.compiler.core.common.NumUtil;
  41 import org.graalvm.compiler.lir.LIRInstructionClass;
  42 import org.graalvm.compiler.lir.Opcode;
  43 import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
  44 import org.graalvm.compiler.lir.gen.LIRGeneratorTool;
  45 
  46 import jdk.vm.ci.amd64.AMD64;
  47 import jdk.vm.ci.amd64.AMD64.CPUFeature;
  48 import jdk.vm.ci.amd64.AMD64Kind;
  49 import jdk.vm.ci.code.Register;
  50 import jdk.vm.ci.code.TargetDescription;
  51 import jdk.vm.ci.meta.JavaKind;
  52 import jdk.vm.ci.meta.Value;
  53 import sun.misc.Unsafe;
  54 
  55 /**
  56  * Emits code which compares two arrays of the same length. If the CPU supports any vector
  57  * instructions specialized code is emitted to leverage these instructions.
  58  */
  59 @Opcode("ARRAY_EQUALS")
  60 public final class AMD64ArrayEqualsOp extends AMD64LIRInstruction {
  61     public static final LIRInstructionClass<AMD64ArrayEqualsOp> TYPE = LIRInstructionClass.create(AMD64ArrayEqualsOp.class);
  62 
  63     private final JavaKind kind;
  64     private final int arrayBaseOffset;
  65     private final int arrayIndexScale;
  66 
  67     @Def({REG}) protected Value resultValue;
  68     @Alive({REG}) protected Value array1Value;
  69     @Alive({REG}) protected Value array2Value;
  70     @Alive({REG}) protected Value lengthValue;
  71     @Temp({REG}) protected Value temp1;
  72     @Temp({REG}) protected Value temp2;
  73     @Temp({REG}) protected Value temp3;
  74     @Temp({REG}) protected Value temp4;
  75 
  76     @Temp({REG, ILLEGAL}) protected Value temp5;
  77     @Temp({REG, ILLEGAL}) protected Value tempXMM;
  78 
  79     @Temp({REG, ILLEGAL}) protected Value vectorTemp1;
  80     @Temp({REG, ILLEGAL}) protected Value vectorTemp2;
  81 
  82     public AMD64ArrayEqualsOp(LIRGeneratorTool tool, JavaKind kind, Value result, Value array1, Value array2, Value length) {
  83         super(TYPE);
  84         this.kind = kind;
  85 
  86         Class<?> arrayClass = Array.newInstance(kind.toJavaClass(), 0).getClass();
  87         this.arrayBaseOffset = UNSAFE.arrayBaseOffset(arrayClass);
  88         this.arrayIndexScale = UNSAFE.arrayIndexScale(arrayClass);
  89 
  90         this.resultValue = result;
  91         this.array1Value = array1;
  92         this.array2Value = array2;
  93         this.lengthValue = length;
  94 
  95         // Allocate some temporaries.
  96         this.temp1 = tool.newVariable(LIRKind.unknownReference(tool.target().arch.getWordKind()));
  97         this.temp2 = tool.newVariable(LIRKind.unknownReference(tool.target().arch.getWordKind()));
  98         this.temp3 = tool.newVariable(LIRKind.value(tool.target().arch.getWordKind()));
  99         this.temp4 = tool.newVariable(LIRKind.value(tool.target().arch.getWordKind()));
 100 
 101         this.temp5 = kind.isNumericFloat() ? tool.newVariable(LIRKind.value(tool.target().arch.getWordKind())) : Value.ILLEGAL;
 102         if (kind == JavaKind.Float) {
 103             this.tempXMM = tool.newVariable(LIRKind.value(AMD64Kind.SINGLE));
 104         } else if (kind == JavaKind.Double) {
 105             this.tempXMM = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
 106         } else {
 107             this.tempXMM = Value.ILLEGAL;
 108         }
 109 
 110         // We only need the vector temporaries if we generate SSE code.
 111         if (supportsSSE41(tool.target())) {
 112             this.vectorTemp1 = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
 113             this.vectorTemp2 = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
 114         } else {
 115             this.vectorTemp1 = Value.ILLEGAL;
 116             this.vectorTemp2 = Value.ILLEGAL;
 117         }
 118     }
 119 
 120     @Override
 121     public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
 122         Register result = asRegister(resultValue);
 123         Register array1 = asRegister(temp1);
 124         Register array2 = asRegister(temp2);
 125         Register length = asRegister(temp3);
 126 
 127         Label trueLabel = new Label();
 128         Label falseLabel = new Label();
 129         Label done = new Label();
 130 
 131         // Load array base addresses.
 132         masm.leaq(array1, new AMD64Address(asRegister(array1Value), arrayBaseOffset));
 133         masm.leaq(array2, new AMD64Address(asRegister(array2Value), arrayBaseOffset));
 134 
 135         // Get array length in bytes.
 136         masm.movl(length, asRegister(lengthValue));
 137 
 138         if (arrayIndexScale > 1) {
 139             masm.shll(length, NumUtil.log2Ceil(arrayIndexScale)); // scale length
 140         }
 141 
 142         masm.movl(result, length); // copy
 143 
 144         if (supportsAVX2(crb.target)) {
 145             emitAVXCompare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 146         } else if (supportsSSE41(crb.target)) {
 147             // this code is used for AVX as well because our backend correctly ensures that
 148             // VEX-prefixed instructions are emitted if AVX is supported
 149             emitSSE41Compare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 150         }
 151 
 152         emit8ByteCompare(crb, masm, result, array1, array2, length, trueLabel, falseLabel);
 153         emitTailCompares(masm, result, array1, array2, length, trueLabel, falseLabel);
 154 
 155         // Return true
 156         masm.bind(trueLabel);
 157         masm.movl(result, 1);
 158         masm.jmpb(done);
 159 
 160         // Return false
 161         masm.bind(falseLabel);
 162         masm.xorl(result, result);
 163 
 164         // That's it
 165         masm.bind(done);
 166     }
 167 
 168     /**
 169      * Returns if the underlying AMD64 architecture supports SSE 4.1 instructions.
 170      *
 171      * @param target target description of the underlying architecture
 172      * @return true if the underlying architecture supports SSE 4.1
 173      */
 174     private static boolean supportsSSE41(TargetDescription target) {
 175         AMD64 arch = (AMD64) target.arch;
 176         return arch.getFeatures().contains(CPUFeature.SSE4_1);
 177     }
 178 
 179     /**
 180      * Vector size used in {@link #emitSSE41Compare}.
 181      */
 182     private static final int SSE4_1_VECTOR_SIZE = 16;
 183 
 184     /**
 185      * Emits code that uses SSE4.1 128-bit (16-byte) vector compares.
 186      */
 187     private void emitSSE41Compare(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 188         assert supportsSSE41(crb.target);
 189 
 190         Register vector1 = asRegister(vectorTemp1, AMD64Kind.DOUBLE);
 191         Register vector2 = asRegister(vectorTemp2, AMD64Kind.DOUBLE);
 192 
 193         Label loop = new Label();
 194         Label compareTail = new Label();
 195 
 196         boolean requiresNaNCheck = kind.isNumericFloat();
 197         Label loopCheck = new Label();
 198         Label nanCheck = new Label();
 199 
 200         // Compare 16-byte vectors
 201         masm.andl(result, SSE4_1_VECTOR_SIZE - 1); // tail count (in bytes)
 202         masm.andl(length, ~(SSE4_1_VECTOR_SIZE - 1)); // vector count (in bytes)
 203         masm.jcc(ConditionFlag.Zero, compareTail);
 204 
 205         masm.leaq(array1, new AMD64Address(array1, length, Scale.Times1, 0));
 206         masm.leaq(array2, new AMD64Address(array2, length, Scale.Times1, 0));
 207         masm.negq(length);
 208 
 209         // Align the main loop
 210         masm.align(crb.target.wordSize * 2);
 211         masm.bind(loop);
 212         masm.movdqu(vector1, new AMD64Address(array1, length, Scale.Times1, 0));
 213         masm.movdqu(vector2, new AMD64Address(array2, length, Scale.Times1, 0));
 214         masm.pxor(vector1, vector2);
 215         masm.ptest(vector1, vector1);
 216         masm.jcc(ConditionFlag.NotZero, requiresNaNCheck ? nanCheck : falseLabel);
 217 
 218         masm.bind(loopCheck);
 219         masm.addq(length, SSE4_1_VECTOR_SIZE);
 220         masm.jcc(ConditionFlag.NotZero, loop);
 221 
 222         masm.testl(result, result);
 223         masm.jcc(ConditionFlag.Zero, trueLabel);
 224 
 225         if (requiresNaNCheck) {
 226             Label unalignedCheck = new Label();
 227             masm.jmpb(unalignedCheck);
 228             masm.bind(nanCheck);
 229             emitFloatCompareWithinRange(crb, masm, array1, array2, length, 0, falseLabel, SSE4_1_VECTOR_SIZE);
 230             masm.jmpb(loopCheck);
 231             masm.bind(unalignedCheck);
 232         }
 233 
 234         /*
 235          * Compare the remaining bytes with an unaligned memory load aligned to the end of the
 236          * array.
 237          */
 238         masm.movdqu(vector1, new AMD64Address(array1, result, Scale.Times1, -SSE4_1_VECTOR_SIZE));
 239         masm.movdqu(vector2, new AMD64Address(array2, result, Scale.Times1, -SSE4_1_VECTOR_SIZE));
 240         masm.pxor(vector1, vector2);
 241         masm.ptest(vector1, vector1);
 242         if (requiresNaNCheck) {
 243             masm.jcc(ConditionFlag.Zero, trueLabel);
 244             emitFloatCompareWithinRange(crb, masm, array1, array2, result, -SSE4_1_VECTOR_SIZE, falseLabel, SSE4_1_VECTOR_SIZE);
 245         } else {
 246             masm.jcc(ConditionFlag.NotZero, falseLabel);
 247         }
 248         masm.jmp(trueLabel);
 249 
 250         masm.bind(compareTail);
 251         masm.movl(length, result);
 252     }
 253 
 254     /**
 255      * Returns if the underlying AMD64 architecture supports AVX instructions.
 256      *
 257      * @param target target description of the underlying architecture
 258      * @return true if the underlying architecture supports AVX
 259      */
 260     private static boolean supportsAVX2(TargetDescription target) {
 261         AMD64 arch = (AMD64) target.arch;
 262         return arch.getFeatures().contains(CPUFeature.AVX2);
 263     }
 264 
 265     /**
 266      * Vector size used in {@link #emitAVXCompare}.
 267      */
 268     private static final int AVX_VECTOR_SIZE = 32;
 269 
 270     private void emitAVXCompare(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 271         assert supportsAVX2(crb.target);
 272 
 273         Register vector1 = asRegister(vectorTemp1, AMD64Kind.DOUBLE);
 274         Register vector2 = asRegister(vectorTemp2, AMD64Kind.DOUBLE);
 275 
 276         Label loop = new Label();
 277         Label compareTail = new Label();
 278 
 279         boolean requiresNaNCheck = kind.isNumericFloat();
 280         Label loopCheck = new Label();
 281         Label nanCheck = new Label();
 282 
 283         // Compare 16-byte vectors
 284         masm.andl(result, AVX_VECTOR_SIZE - 1); // tail count (in bytes)
 285         masm.andl(length, ~(AVX_VECTOR_SIZE - 1)); // vector count (in bytes)
 286         masm.jcc(ConditionFlag.Zero, compareTail);
 287 
 288         masm.leaq(array1, new AMD64Address(array1, length, Scale.Times1, 0));
 289         masm.leaq(array2, new AMD64Address(array2, length, Scale.Times1, 0));
 290         masm.negq(length);
 291 
 292         // Align the main loop
 293         masm.align(crb.target.wordSize * 2);
 294         masm.bind(loop);
 295         masm.vmovdqu(vector1, new AMD64Address(array1, length, Scale.Times1, 0));
 296         masm.vmovdqu(vector2, new AMD64Address(array2, length, Scale.Times1, 0));
 297         masm.vpxor(vector1, vector1, vector2);
 298         masm.vptest(vector1, vector1);
 299         masm.jcc(ConditionFlag.NotZero, requiresNaNCheck ? nanCheck : falseLabel);
 300 
 301         masm.bind(loopCheck);
 302         masm.addq(length, AVX_VECTOR_SIZE);
 303         masm.jcc(ConditionFlag.NotZero, loop);
 304 
 305         masm.testl(result, result);
 306         masm.jcc(ConditionFlag.Zero, trueLabel);
 307 
 308         if (requiresNaNCheck) {
 309             Label unalignedCheck = new Label();
 310             masm.jmpb(unalignedCheck);
 311             masm.bind(nanCheck);
 312             emitFloatCompareWithinRange(crb, masm, array1, array2, length, 0, falseLabel, AVX_VECTOR_SIZE);
 313             masm.jmpb(loopCheck);
 314             masm.bind(unalignedCheck);
 315         }
 316 
 317         /*
 318          * Compare the remaining bytes with an unaligned memory load aligned to the end of the
 319          * array.
 320          */
 321         masm.vmovdqu(vector1, new AMD64Address(array1, result, Scale.Times1, -AVX_VECTOR_SIZE));
 322         masm.vmovdqu(vector2, new AMD64Address(array2, result, Scale.Times1, -AVX_VECTOR_SIZE));
 323         masm.vpxor(vector1, vector1, vector2);
 324         masm.vptest(vector1, vector1);
 325         if (requiresNaNCheck) {
 326             masm.jcc(ConditionFlag.Zero, trueLabel);
 327             emitFloatCompareWithinRange(crb, masm, array1, array2, result, -AVX_VECTOR_SIZE, falseLabel, AVX_VECTOR_SIZE);
 328         } else {
 329             masm.jcc(ConditionFlag.NotZero, falseLabel);
 330         }
 331         masm.jmp(trueLabel);
 332 
 333         masm.bind(compareTail);
 334         masm.movl(length, result);
 335     }
 336 
 337     /**
 338      * Vector size used in {@link #emit8ByteCompare}.
 339      */
 340     private static final int VECTOR_SIZE = 8;
 341 
 342     /**
 343      * Emits code that uses 8-byte vector compares.
 344      */
 345     private void emit8ByteCompare(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 346         Label loop = new Label();
 347         Label compareTail = new Label();
 348 
 349         boolean requiresNaNCheck = kind.isNumericFloat();
 350         Label loopCheck = new Label();
 351         Label nanCheck = new Label();
 352 
 353         Register temp = asRegister(temp4);
 354 
 355         masm.andl(result, VECTOR_SIZE - 1); // tail count (in bytes)
 356         masm.andl(length, ~(VECTOR_SIZE - 1));  // vector count (in bytes)
 357         masm.jcc(ConditionFlag.Zero, compareTail);
 358 
 359         masm.leaq(array1, new AMD64Address(array1, length, Scale.Times1, 0));
 360         masm.leaq(array2, new AMD64Address(array2, length, Scale.Times1, 0));
 361         masm.negq(length);
 362 
 363         // Align the main loop
 364         masm.align(crb.target.wordSize * 2);
 365         masm.bind(loop);
 366         masm.movq(temp, new AMD64Address(array1, length, Scale.Times1, 0));
 367         masm.cmpq(temp, new AMD64Address(array2, length, Scale.Times1, 0));
 368         masm.jcc(ConditionFlag.NotEqual, requiresNaNCheck ? nanCheck : falseLabel);
 369 
 370         masm.bind(loopCheck);
 371         masm.addq(length, VECTOR_SIZE);
 372         masm.jccb(ConditionFlag.NotZero, loop);
 373 
 374         masm.testl(result, result);
 375         masm.jcc(ConditionFlag.Zero, trueLabel);
 376 
 377         if (requiresNaNCheck) {
 378             // NaN check is slow path and hence placed outside of the main loop.
 379             Label unalignedCheck = new Label();
 380             masm.jmpb(unalignedCheck);
 381             masm.bind(nanCheck);
 382             // At most two iterations, unroll in the emitted code.
 383             for (int offset = 0; offset < VECTOR_SIZE; offset += kind.getByteCount()) {
 384                 emitFloatCompare(masm, array1, array2, length, offset, falseLabel, kind.getByteCount() == VECTOR_SIZE);
 385             }
 386             masm.jmpb(loopCheck);
 387             masm.bind(unalignedCheck);
 388         }
 389 
 390         /*
 391          * Compare the remaining bytes with an unaligned memory load aligned to the end of the
 392          * array.
 393          */
 394         masm.movq(temp, new AMD64Address(array1, result, Scale.Times1, -VECTOR_SIZE));
 395         masm.cmpq(temp, new AMD64Address(array2, result, Scale.Times1, -VECTOR_SIZE));
 396         if (requiresNaNCheck) {
 397             masm.jcc(ConditionFlag.Equal, trueLabel);
 398             // At most two iterations, unroll in the emitted code.
 399             for (int offset = 0; offset < VECTOR_SIZE; offset += kind.getByteCount()) {
 400                 emitFloatCompare(masm, array1, array2, result, -VECTOR_SIZE + offset, falseLabel, kind.getByteCount() == VECTOR_SIZE);
 401             }
 402         } else {
 403             masm.jccb(ConditionFlag.NotEqual, falseLabel);
 404         }
 405         masm.jmpb(trueLabel);
 406 
 407         masm.bind(compareTail);
 408         masm.movl(length, result);
 409     }
 410 
 411     /**
 412      * Emits code to compare the remaining 1 to 4 bytes.
 413      */
 414     private void emitTailCompares(AMD64MacroAssembler masm, Register result, Register array1, Register array2, Register length, Label trueLabel, Label falseLabel) {
 415         Label compare2Bytes = new Label();
 416         Label compare1Byte = new Label();
 417 
 418         Register temp = asRegister(temp4);
 419 
 420         if (kind.getByteCount() <= 4) {
 421             // Compare trailing 4 bytes, if any.
 422             masm.testl(result, 4);
 423             masm.jccb(ConditionFlag.Zero, compare2Bytes);
 424             masm.movl(temp, new AMD64Address(array1, 0));
 425             masm.cmpl(temp, new AMD64Address(array2, 0));
 426             if (kind == JavaKind.Float) {
 427                 masm.jccb(ConditionFlag.Equal, trueLabel);
 428                 emitFloatCompare(masm, array1, array2, Register.None, 0, falseLabel, true);
 429                 masm.jmpb(trueLabel);
 430             } else {
 431                 masm.jccb(ConditionFlag.NotEqual, falseLabel);
 432             }
 433             if (kind.getByteCount() <= 2) {
 434                 // Move array pointers forward.
 435                 masm.leaq(array1, new AMD64Address(array1, 4));
 436                 masm.leaq(array2, new AMD64Address(array2, 4));
 437 
 438                 // Compare trailing 2 bytes, if any.
 439                 masm.bind(compare2Bytes);
 440                 masm.testl(result, 2);
 441                 masm.jccb(ConditionFlag.Zero, compare1Byte);
 442                 masm.movzwl(temp, new AMD64Address(array1, 0));
 443                 masm.movzwl(length, new AMD64Address(array2, 0));
 444                 masm.cmpl(temp, length);
 445                 masm.jccb(ConditionFlag.NotEqual, falseLabel);
 446 
 447                 // The one-byte tail compare is only required for boolean and byte arrays.
 448                 if (kind.getByteCount() <= 1) {
 449                     // Move array pointers forward before we compare the last trailing byte.
 450                     masm.leaq(array1, new AMD64Address(array1, 2));
 451                     masm.leaq(array2, new AMD64Address(array2, 2));
 452 
 453                     // Compare trailing byte, if any.
 454                     masm.bind(compare1Byte);
 455                     masm.testl(result, 1);
 456                     masm.jccb(ConditionFlag.Zero, trueLabel);
 457                     masm.movzbl(temp, new AMD64Address(array1, 0));
 458                     masm.movzbl(length, new AMD64Address(array2, 0));
 459                     masm.cmpl(temp, length);
 460                     masm.jccb(ConditionFlag.NotEqual, falseLabel);
 461                 } else {
 462                     masm.bind(compare1Byte);
 463                 }
 464             } else {
 465                 masm.bind(compare2Bytes);
 466             }
 467         }
 468     }
 469 
 470     /**
 471      * Emits code to fall through if {@code src} is NaN, otherwise jump to {@code branchOrdered}.
 472      */
 473     private void emitNaNCheck(AMD64MacroAssembler masm, AMD64Address src, Label branchIfNonNaN) {
 474         assert kind.isNumericFloat();
 475         Register tempXMMReg = asRegister(tempXMM);
 476         if (kind == JavaKind.Float) {
 477             masm.movflt(tempXMMReg, src);
 478         } else {
 479             masm.movdbl(tempXMMReg, src);
 480         }
 481         SSEOp.UCOMIS.emit(masm, kind == JavaKind.Float ? OperandSize.PS : OperandSize.PD, tempXMMReg, tempXMMReg);
 482         masm.jcc(ConditionFlag.NoParity, branchIfNonNaN);
 483     }
 484 
 485     /**
 486      * Emits code to compare if two floats are bitwise equal or both NaN.
 487      */
 488     private void emitFloatCompare(AMD64MacroAssembler masm, Register base1, Register base2, Register index, int offset, Label falseLabel, boolean skipBitwiseCompare) {
 489         AMD64Address address1 = new AMD64Address(base1, index, Scale.Times1, offset);
 490         AMD64Address address2 = new AMD64Address(base2, index, Scale.Times1, offset);
 491 
 492         Label bitwiseEqual = new Label();
 493 
 494         if (!skipBitwiseCompare) {
 495             // Bitwise compare
 496             Register temp = asRegister(temp4);
 497 
 498             if (kind == JavaKind.Float) {
 499                 masm.movl(temp, address1);
 500                 masm.cmpl(temp, address2);
 501             } else {
 502                 masm.movq(temp, address1);
 503                 masm.cmpq(temp, address2);
 504             }
 505             masm.jccb(ConditionFlag.Equal, bitwiseEqual);
 506         }
 507 
 508         emitNaNCheck(masm, address1, falseLabel);
 509         emitNaNCheck(masm, address2, falseLabel);
 510 
 511         masm.bind(bitwiseEqual);
 512     }
 513 
 514     /**
 515      * Emits code to compare float equality within a range.
 516      */
 517     private void emitFloatCompareWithinRange(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register base1, Register base2, Register index, int offset, Label falseLabel, int range) {
 518         assert kind.isNumericFloat();
 519         Label loop = new Label();
 520         Register i = asRegister(temp5);
 521 
 522         masm.movq(i, range);
 523         masm.negq(i);
 524         // Align the main loop
 525         masm.align(crb.target.wordSize * 2);
 526         masm.bind(loop);
 527         emitFloatCompare(masm, base1, base2, index, offset, falseLabel, kind.getByteCount() == range);
 528         masm.addq(index, kind.getByteCount());
 529         masm.addq(i, kind.getByteCount());
 530         masm.jccb(ConditionFlag.NotZero, loop);
 531         // Floats within the range are equal, revert change to the register index
 532         masm.subq(index, range);
 533     }
 534 
 535     private static final Unsafe UNSAFE = initUnsafe();
 536 
 537     private static Unsafe initUnsafe() {
 538         try {
 539             return Unsafe.getUnsafe();
 540         } catch (SecurityException se) {
 541             try {
 542                 Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
 543                 theUnsafe.setAccessible(true);
 544                 return (Unsafe) theUnsafe.get(Unsafe.class);
 545             } catch (Exception e) {
 546                 throw new RuntimeException("exception while trying to get Unsafe", e);
 547             }
 548         }
 549     }
 550 }