< prev index next >

src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayIndexOfOp.java

Print this page
rev 56282 : [mq]: graal


 547                 emitMOVMSK(asm, getVectorSize(), cmpResult[1], vecArray[i]);
 548                 emitMOVMSK(asm, getVectorSize(), cmpResult[0], vecArray[i + 1]);
 549                 emitJnz(asm, cmpResult[1], vectorFound[nVectors - (i + 1)], shortJmp);
 550                 emitJnz(asm, cmpResult[0], vectorFound[nVectors - (i + 2)], shortJmp);
 551             }
 552         }
 553     }
 554 
 555     private static void emitJnz(AMD64MacroAssembler asm, Register cond, Label tgt, boolean shortJmp) {
 556         asm.testl(cond, cond);
 557         if (shortJmp) {
 558             asm.jccb(AMD64Assembler.ConditionFlag.NotZero, tgt);
 559         } else {
 560             asm.jcc(AMD64Assembler.ConditionFlag.NotZero, tgt);
 561         }
 562     }
 563 
 564     private void emitArrayLoad(AMD64MacroAssembler asm, AVXKind.AVXSize vectorSize, Register vecDst, Register arrayPtr, Register index, int offset, boolean alignedLoad) {
 565         AMD64Address src = new AMD64Address(arrayPtr, index, arrayIndexScale, offset);
 566         if (asm.supports(CPUFeature.AVX)) {
 567             VexMoveOp loadOp = alignedLoad ? VexMoveOp.VMOVDQA : VexMoveOp.VMOVDQU;
 568             loadOp.emit(asm, vectorSize, vecDst, src);
 569         } else {
 570             // SSE
 571             asm.movdqu(vecDst, src);
 572         }
 573     }
 574 
 575     /**
 576      * Compares all packed bytes/words/dwords in {@code vecArray} to {@code vecCmp}. Matching values
 577      * are set to all ones (0xff, 0xffff, ...), non-matching values are set to zero.
 578      */
 579     private static void emitVectorCompareInst(AMD64MacroAssembler asm, JavaKind kind, AVXKind.AVXSize vectorSize, Register vecArray, Register vecCmp) {
 580         switch (kind) {
 581             case Byte:
 582                 if (asm.supports(CPUFeature.AVX)) {
 583                     VexRVMOp.VPCMPEQB.emit(asm, vectorSize, vecArray, vecCmp, vecArray);
 584                 } else { // SSE
 585                     asm.pcmpeqb(vecArray, vecCmp);
 586                 }
 587                 break;




 547                 emitMOVMSK(asm, getVectorSize(), cmpResult[1], vecArray[i]);
 548                 emitMOVMSK(asm, getVectorSize(), cmpResult[0], vecArray[i + 1]);
 549                 emitJnz(asm, cmpResult[1], vectorFound[nVectors - (i + 1)], shortJmp);
 550                 emitJnz(asm, cmpResult[0], vectorFound[nVectors - (i + 2)], shortJmp);
 551             }
 552         }
 553     }
 554 
 555     private static void emitJnz(AMD64MacroAssembler asm, Register cond, Label tgt, boolean shortJmp) {
 556         asm.testl(cond, cond);
 557         if (shortJmp) {
 558             asm.jccb(AMD64Assembler.ConditionFlag.NotZero, tgt);
 559         } else {
 560             asm.jcc(AMD64Assembler.ConditionFlag.NotZero, tgt);
 561         }
 562     }
 563 
 564     private void emitArrayLoad(AMD64MacroAssembler asm, AVXKind.AVXSize vectorSize, Register vecDst, Register arrayPtr, Register index, int offset, boolean alignedLoad) {
 565         AMD64Address src = new AMD64Address(arrayPtr, index, arrayIndexScale, offset);
 566         if (asm.supports(CPUFeature.AVX)) {
 567             VexMoveOp loadOp = alignedLoad ? VexMoveOp.VMOVDQA32 : VexMoveOp.VMOVDQU32;
 568             loadOp.emit(asm, vectorSize, vecDst, src);
 569         } else {
 570             // SSE
 571             asm.movdqu(vecDst, src);
 572         }
 573     }
 574 
 575     /**
 576      * Compares all packed bytes/words/dwords in {@code vecArray} to {@code vecCmp}. Matching values
 577      * are set to all ones (0xff, 0xffff, ...), non-matching values are set to zero.
 578      */
 579     private static void emitVectorCompareInst(AMD64MacroAssembler asm, JavaKind kind, AVXKind.AVXSize vectorSize, Register vecArray, Register vecCmp) {
 580         switch (kind) {
 581             case Byte:
 582                 if (asm.supports(CPUFeature.AVX)) {
 583                     VexRVMOp.VPCMPEQB.emit(asm, vectorSize, vecArray, vecCmp, vecArray);
 584                 } else { // SSE
 585                     asm.pcmpeqb(vecArray, vecCmp);
 586                 }
 587                 break;


< prev index next >