31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
32 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;
36 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
37 import static org.graalvm.compiler.lir.LIRValueUtil.asConstant;
38 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
39 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
40 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
41 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant;
42 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
43
44 import java.util.Optional;
45
46 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
49 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
50 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
51 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
52 import org.graalvm.compiler.core.common.LIRKind;
53 import org.graalvm.compiler.core.common.NumUtil;
54 import org.graalvm.compiler.core.common.calc.Condition;
55 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
56 import org.graalvm.compiler.core.common.spi.LIRKindTool;
57 import org.graalvm.compiler.debug.GraalError;
58 import org.graalvm.compiler.lir.ConstantValue;
59 import org.graalvm.compiler.lir.LIRFrameState;
60 import org.graalvm.compiler.lir.LIRInstruction;
61 import org.graalvm.compiler.lir.LIRValueUtil;
62 import org.graalvm.compiler.lir.LabelRef;
63 import org.graalvm.compiler.lir.StandardOp.JumpOp;
64 import org.graalvm.compiler.lir.StandardOp.SaveRegistersOp;
65 import org.graalvm.compiler.lir.SwitchStrategy;
66 import org.graalvm.compiler.lir.Variable;
67 import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
68 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
69 import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp;
70 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;
71 import org.graalvm.compiler.lir.amd64.AMD64ArrayIndexOfOp;
72 import org.graalvm.compiler.lir.amd64.AMD64Binary;
73 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
74 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
75 import org.graalvm.compiler.lir.amd64.AMD64Call;
76 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow;
77 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
78 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;
79 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp;
80 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
81 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp;
82 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp;
83 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.HashTableSwitchOp;
84 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
85 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
86 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
87 import org.graalvm.compiler.lir.amd64.AMD64LFenceOp;
88 import org.graalvm.compiler.lir.amd64.AMD64Move;
89 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
90 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
91 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp;
92 import org.graalvm.compiler.lir.amd64.AMD64PauseOp;
93 import org.graalvm.compiler.lir.amd64.AMD64StringLatin1InflateOp;
94 import org.graalvm.compiler.lir.amd64.AMD64StringUTF16CompressOp;
95 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp;
96 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp;
97 import org.graalvm.compiler.lir.gen.LIRGenerationResult;
98 import org.graalvm.compiler.lir.gen.LIRGenerator;
99 import org.graalvm.compiler.lir.hashing.Hasher;
100 import org.graalvm.compiler.phases.util.Providers;
101
102 import jdk.vm.ci.amd64.AMD64;
103 import jdk.vm.ci.amd64.AMD64Kind;
104 import jdk.vm.ci.code.CallingConvention;
105 import jdk.vm.ci.code.Register;
106 import jdk.vm.ci.code.RegisterValue;
107 import jdk.vm.ci.code.StackSlot;
108 import jdk.vm.ci.meta.AllocatableValue;
109 import jdk.vm.ci.meta.JavaConstant;
110 import jdk.vm.ci.meta.JavaKind;
111 import jdk.vm.ci.meta.PlatformKind;
112 import jdk.vm.ci.meta.VMConstant;
113 import jdk.vm.ci.meta.Value;
114 import jdk.vm.ci.meta.ValueKind;
115
116 /**
387 }
388 } else {
389 append(new CondSetOp(result, finalCondition.negate()));
390 }
391 } else if (isFloatComparison) {
392 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue)));
393 } else {
394 append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue)));
395 }
396 return result;
397 }
398
399 @Override
400 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
401 emitIntegerTest(left, right);
402 Variable result = newVariable(trueValue.getValueKind());
403 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue)));
404 return result;
405 }
406
407 private void emitIntegerTest(Value a, Value b) {
408 assert ((AMD64Kind) a.getPlatformKind()).isInteger();
409 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
410 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) {
411 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong()));
412 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) {
413 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong()));
414 } else if (isAllocatableValue(b)) {
415 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a)));
416 } else {
417 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b)));
418 }
419 }
420
421 /**
422 * This method emits the compare against memory instruction, and may reorder the operands. It
423 * returns true if it did so.
424 *
425 * @param b the right operand of the comparison
426 * @return true if the left and right operands were switched, false otherwise
427 */
428 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) {
429 OperandSize size;
430 switch (cmpKind) {
431 case BYTE:
432 size = OperandSize.BYTE;
433 break;
434 case WORD:
435 size = OperandSize.WORD;
436 break;
437 case DWORD:
438 size = OperandSize.DWORD;
439 break;
530 Variable result = newVariable(LIRKind.combine(input));
531 append(new AMD64ByteSwapOp(result, input));
532 return result;
533 }
534
535 @Override
536 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
537 LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD);
538 RegisterValue raxRes = AMD64.rax.asValue(resultKind);
539 RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind());
540 RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind());
541 emitMove(cnt1, length1);
542 emitMove(cnt2, length2);
543 append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2));
544 Variable result = newVariable(resultKind);
545 emitMove(result, raxRes);
546 return result;
547 }
548
549 @Override
550 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, int constantLength, boolean directPointers) {
551 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
552 append(new AMD64ArrayEqualsOp(this, kind, kind, result, array1, array2, asAllocatable(length), constantLength, directPointers, getMaxVectorSize()));
553 return result;
554 }
555
556 @Override
557 public Variable emitArrayEquals(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length, int constantLength, boolean directPointers) {
558 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
559 append(new AMD64ArrayEqualsOp(this, kind1, kind2, result, array1, array2, asAllocatable(length), constantLength, directPointers, getMaxVectorSize()));
560 return result;
561 }
562
563 /**
564 * Return a conservative estimate of the page size for use by the String.indexOf intrinsic.
565 */
566 protected int getVMPageSize() {
567 return 4096;
568 }
569
570 /**
571 * Return the maximum size of vector registers used in SSE/AVX instructions.
572 */
573 protected int getMaxVectorSize() {
574 // default for "unlimited"
575 return -1;
576 }
577
578 @Override
579 public Variable emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues) {
580 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
581 append(new AMD64ArrayIndexOfOp(arrayKind, valueKind, findTwoConsecutive, getMaxVectorSize(), this, result,
582 asAllocatable(arrayPointer), asAllocatable(arrayLength), asAllocatable(fromIndex), searchValues));
583 return result;
584 }
585
586 @Override
587 public void emitStringLatin1Inflate(Value src, Value dst, Value len) {
588 RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind());
589 RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind());
590 RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind());
644
645 @Override
646 protected Optional<Hasher> hasherFor(JavaConstant[] keyConstants, double minDensity) {
647 return Hasher.forKeys(keyConstants, minDensity);
648 }
649
650 @Override
651 protected void emitHashTableSwitch(Hasher hasher, JavaConstant[] keys, LabelRef defaultTarget, LabelRef[] targets, Value value) {
652 Value index = hasher.hash(value, arithmeticLIRGen);
653 Variable scratch = newVariable(LIRKind.value(target().arch.getWordKind()));
654 Variable entryScratch = newVariable(LIRKind.value(target().arch.getWordKind()));
655 append(new HashTableSwitchOp(keys, defaultTarget, targets, value, index, scratch, entryScratch));
656 }
657
658 @Override
659 public void emitPause() {
660 append(new AMD64PauseOp());
661 }
662
663 @Override
664 public SaveRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) {
665 return new AMD64ZapRegistersOp(zappedRegisters, zapValues);
666 }
667
668 @Override
669 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) {
670 return new AMD64ZapStackOp(zappedStack, zapValues);
671 }
672
673 @Override
674 public void emitSpeculationFence() {
675 append(new AMD64LFenceOp());
676 }
677 }
|
31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP;
32 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD;
33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD;
34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS;
35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD;
36 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
37 import static org.graalvm.compiler.lir.LIRValueUtil.asConstant;
38 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue;
39 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
40 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
41 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant;
42 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
43
44 import java.util.Optional;
45
46 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
49 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
50 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
51 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRMOp;
52 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize;
53 import org.graalvm.compiler.asm.amd64.AVXKind;
54 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
55 import org.graalvm.compiler.core.common.LIRKind;
56 import org.graalvm.compiler.core.common.NumUtil;
57 import org.graalvm.compiler.core.common.calc.Condition;
58 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage;
59 import org.graalvm.compiler.core.common.spi.LIRKindTool;
60 import org.graalvm.compiler.debug.GraalError;
61 import org.graalvm.compiler.lir.ConstantValue;
62 import org.graalvm.compiler.lir.LIRFrameState;
63 import org.graalvm.compiler.lir.LIRInstruction;
64 import org.graalvm.compiler.lir.LIRValueUtil;
65 import org.graalvm.compiler.lir.LabelRef;
66 import org.graalvm.compiler.lir.StandardOp.JumpOp;
67 import org.graalvm.compiler.lir.StandardOp.ZapRegistersOp;
68 import org.graalvm.compiler.lir.SwitchStrategy;
69 import org.graalvm.compiler.lir.Variable;
70 import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
71 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
72 import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp;
73 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;
74 import org.graalvm.compiler.lir.amd64.AMD64ArrayIndexOfOp;
75 import org.graalvm.compiler.lir.amd64.AMD64Binary;
76 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
77 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp;
78 import org.graalvm.compiler.lir.amd64.AMD64Call;
79 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow;
80 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp;
81 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp;
82 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp;
83 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp;
84 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp;
85 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp;
86 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.HashTableSwitchOp;
87 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
88 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
89 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
90 import org.graalvm.compiler.lir.amd64.AMD64LFenceOp;
91 import org.graalvm.compiler.lir.amd64.AMD64Move;
92 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
93 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
94 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp;
95 import org.graalvm.compiler.lir.amd64.AMD64PauseOp;
96 import org.graalvm.compiler.lir.amd64.AMD64StringLatin1InflateOp;
97 import org.graalvm.compiler.lir.amd64.AMD64StringUTF16CompressOp;
98 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp;
99 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp;
100 import org.graalvm.compiler.lir.amd64.AMD64ZeroMemoryOp;
101 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorCompareOp;
102 import org.graalvm.compiler.lir.gen.LIRGenerationResult;
103 import org.graalvm.compiler.lir.gen.LIRGenerator;
104 import org.graalvm.compiler.lir.hashing.Hasher;
105 import org.graalvm.compiler.phases.util.Providers;
106
107 import jdk.vm.ci.amd64.AMD64;
108 import jdk.vm.ci.amd64.AMD64Kind;
109 import jdk.vm.ci.code.CallingConvention;
110 import jdk.vm.ci.code.Register;
111 import jdk.vm.ci.code.RegisterValue;
112 import jdk.vm.ci.code.StackSlot;
113 import jdk.vm.ci.meta.AllocatableValue;
114 import jdk.vm.ci.meta.JavaConstant;
115 import jdk.vm.ci.meta.JavaKind;
116 import jdk.vm.ci.meta.PlatformKind;
117 import jdk.vm.ci.meta.VMConstant;
118 import jdk.vm.ci.meta.Value;
119 import jdk.vm.ci.meta.ValueKind;
120
121 /**
392 }
393 } else {
394 append(new CondSetOp(result, finalCondition.negate()));
395 }
396 } else if (isFloatComparison) {
397 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue)));
398 } else {
399 append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue)));
400 }
401 return result;
402 }
403
404 @Override
405 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) {
406 emitIntegerTest(left, right);
407 Variable result = newVariable(trueValue.getValueKind());
408 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue)));
409 return result;
410 }
411
412 private static AVXSize getRegisterSize(Value a) {
413 AMD64Kind kind = (AMD64Kind) a.getPlatformKind();
414 if (kind.isXMM()) {
415 return AVXKind.getRegisterSize(kind);
416 } else {
417 return AVXSize.XMM;
418 }
419 }
420
421 private void emitIntegerTest(Value a, Value b) {
422 if (a.getPlatformKind().getVectorLength() > 1) {
423 append(new AMD64VectorCompareOp(VexRMOp.VPTEST, getRegisterSize(a), asAllocatable(a), asAllocatable(b)));
424 } else {
425 assert ((AMD64Kind) a.getPlatformKind()).isInteger();
426 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD;
427 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) {
428 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong()));
429 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) {
430 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong()));
431 } else if (isAllocatableValue(b)) {
432 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a)));
433 } else {
434 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b)));
435 }
436 }
437 }
438
439 /**
440 * This method emits the compare against memory instruction, and may reorder the operands. It
441 * returns true if it did so.
442 *
443 * @param b the right operand of the comparison
444 * @return true if the left and right operands were switched, false otherwise
445 */
446 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) {
447 OperandSize size;
448 switch (cmpKind) {
449 case BYTE:
450 size = OperandSize.BYTE;
451 break;
452 case WORD:
453 size = OperandSize.WORD;
454 break;
455 case DWORD:
456 size = OperandSize.DWORD;
457 break;
548 Variable result = newVariable(LIRKind.combine(input));
549 append(new AMD64ByteSwapOp(result, input));
550 return result;
551 }
552
553 @Override
554 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
555 LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD);
556 RegisterValue raxRes = AMD64.rax.asValue(resultKind);
557 RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind());
558 RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind());
559 emitMove(cnt1, length1);
560 emitMove(cnt2, length2);
561 append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2));
562 Variable result = newVariable(resultKind);
563 emitMove(result, raxRes);
564 return result;
565 }
566
567 @Override
568 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, boolean directPointers) {
569 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
570 append(new AMD64ArrayEqualsOp(this, kind, kind, result, array1, array2, length, directPointers, getMaxVectorSize()));
571 return result;
572 }
573
574 @Override
575 public Variable emitArrayEquals(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length, boolean directPointers) {
576 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
577 append(new AMD64ArrayEqualsOp(this, kind1, kind2, result, array1, array2, length, directPointers, getMaxVectorSize()));
578 return result;
579 }
580
581 /**
582 * Return the maximum size of vector registers used in SSE/AVX instructions.
583 */
584 protected int getMaxVectorSize() {
585 // default for "unlimited"
586 return -1;
587 }
588
589 @Override
590 public Variable emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues) {
591 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
592 append(new AMD64ArrayIndexOfOp(arrayKind, valueKind, findTwoConsecutive, getMaxVectorSize(), this, result,
593 asAllocatable(arrayPointer), asAllocatable(arrayLength), asAllocatable(fromIndex), searchValues));
594 return result;
595 }
596
597 @Override
598 public void emitStringLatin1Inflate(Value src, Value dst, Value len) {
599 RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind());
600 RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind());
601 RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind());
655
656 @Override
657 protected Optional<Hasher> hasherFor(JavaConstant[] keyConstants, double minDensity) {
658 return Hasher.forKeys(keyConstants, minDensity);
659 }
660
661 @Override
662 protected void emitHashTableSwitch(Hasher hasher, JavaConstant[] keys, LabelRef defaultTarget, LabelRef[] targets, Value value) {
663 Value index = hasher.hash(value, arithmeticLIRGen);
664 Variable scratch = newVariable(LIRKind.value(target().arch.getWordKind()));
665 Variable entryScratch = newVariable(LIRKind.value(target().arch.getWordKind()));
666 append(new HashTableSwitchOp(keys, defaultTarget, targets, value, index, scratch, entryScratch));
667 }
668
669 @Override
670 public void emitPause() {
671 append(new AMD64PauseOp());
672 }
673
674 @Override
675 public ZapRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) {
676 return new AMD64ZapRegistersOp(zappedRegisters, zapValues);
677 }
678
679 @Override
680 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) {
681 return new AMD64ZapStackOp(zappedStack, zapValues);
682 }
683
684 @Override
685 public void emitSpeculationFence() {
686 append(new AMD64LFenceOp());
687 }
688
689 @Override
690 public void emitZeroMemory(Value address, Value length) {
691 RegisterValue lengthReg = AMD64.rcx.asValue(length.getValueKind());
692 emitMove(lengthReg, length);
693 append(new AMD64ZeroMemoryOp(asAddressValue(address), lengthReg));
694 }
695 }
|