5594 assert(UseAVX > 0, "requires some form of AVX");
5595 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5596 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5597 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5598 emit_int8((unsigned char)0xEF);
5599 emit_int8((unsigned char)(0xC0 | encode));
5600 }
5601
5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5603 assert(UseAVX > 0, "requires some form of AVX");
5604 InstructionMark im(this);
5605 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5606 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5607 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5608 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5609 emit_int8((unsigned char)0xEF);
5610 emit_operand(dst, src);
5611 }
5612
5613
5614 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5615 assert(VM_Version::supports_avx(), "");
5616 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5617 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5618 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5619 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5620 emit_int8(0x18);
5621 emit_int8((unsigned char)(0xC0 | encode));
5622 // 0x00 - insert into lower 128 bits
5623 // 0x01 - insert into upper 128 bits
5624 emit_int8(0x01);
5625 }
5626
5627 void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
5628 assert(VM_Version::supports_evex(), "");
5629 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5630 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5631 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5632 emit_int8(0x1A);
5633 emit_int8((unsigned char)(0xC0 | encode));
5634 // 0x00 - insert into lower 256 bits
5635 // 0x01 - insert into upper 256 bits
5636 emit_int8(value & 0x01);
5637 }
5638
5639 void Assembler::vinsertf64x4h(XMMRegister dst, Address src, int value) {
5640 assert(VM_Version::supports_evex(), "");
5641 assert(dst != xnoreg, "sanity");
5642 InstructionMark im(this);
5643 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5644 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
5645 // swap src<->dst for encoding
5646 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5647 emit_int8(0x1A);
5648 emit_operand(dst, src);
5649 // 0x00 - insert into lower 256 bits
5650 // 0x01 - insert into upper 128 bits
5651 emit_int8(value & 0x01);
5652 }
5653
5654 void Assembler::vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
5655 assert(VM_Version::supports_evex(), "");
5656 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5657 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5658 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5659 emit_int8(0x18);
5660 emit_int8((unsigned char)(0xC0 | encode));
5661 // 0x00 - insert into q0 128 bits (0..127)
5662 // 0x01 - insert into q1 128 bits (128..255)
5663 // 0x02 - insert into q2 128 bits (256..383)
5664 // 0x03 - insert into q3 128 bits (384..511)
5665 emit_int8(value & 0x3);
5666 }
5667
5668 void Assembler::vinsertf32x4h(XMMRegister dst, Address src, int value) {
5669 assert(VM_Version::supports_avx(), "");
5670 assert(dst != xnoreg, "sanity");
5671 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5672 InstructionMark im(this);
5673 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5674 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5675 // swap src<->dst for encoding
5676 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5677 emit_int8(0x18);
5678 emit_operand(dst, src);
5679 // 0x00 - insert into q0 128 bits (0..127)
5680 // 0x01 - insert into q1 128 bits (128..255)
5681 // 0x02 - insert into q2 128 bits (256..383)
5682 // 0x03 - insert into q3 128 bits (384..511)
5683 emit_int8(value & 0x3);
5684 }
5685
5686 void Assembler::vinsertf128h(XMMRegister dst, Address src) {
5687 assert(VM_Version::supports_avx(), "");
5688 assert(dst != xnoreg, "sanity");
5689 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5690 InstructionMark im(this);
5691 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5692 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5693 // swap src<->dst for encoding
5694 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5695 emit_int8(0x18);
5696 emit_operand(dst, src);
5697 // 0x01 - insert into upper 128 bits
5698 emit_int8(0x01);
5699 }
5700
5701 void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) {
5702 assert(VM_Version::supports_avx(), "");
5703 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5704 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5705 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5706 emit_int8(0x19);
5707 emit_int8((unsigned char)(0xC0 | encode));
5708 // 0x00 - insert into lower 128 bits
5709 // 0x01 - insert into upper 128 bits
5710 emit_int8(0x01);
5711 }
5712
5713 void Assembler::vextractf128h(Address dst, XMMRegister src) {
5714 assert(VM_Version::supports_avx(), "");
5715 assert(src != xnoreg, "sanity");
5716 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5717 InstructionMark im(this);
5718 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5719 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5720 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5721 emit_int8(0x19);
5722 emit_operand(src, dst);
5723 // 0x01 - extract from upper 128 bits
5724 emit_int8(0x01);
5725 }
5726
5727 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5728 assert(VM_Version::supports_avx2(), "");
5729 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5730 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5731 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5732 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5733 emit_int8(0x38);
5734 emit_int8((unsigned char)(0xC0 | encode));
5735 // 0x00 - insert into lower 128 bits
5736 // 0x01 - insert into upper 128 bits
5737 emit_int8(0x01);
5738 }
5739
5740 void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
5741 assert(VM_Version::supports_evex(), "");
5742 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5743 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5744 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5745 emit_int8(0x38);
5746 emit_int8((unsigned char)(0xC0 | encode));
5747 // 0x00 - insert into lower 256 bits
5748 // 0x01 - insert into upper 256 bits
5749 emit_int8(value & 0x01);
5750 }
5751
5752 void Assembler::vinserti128h(XMMRegister dst, Address src) {
5753 assert(VM_Version::supports_avx2(), "");
5754 assert(dst != xnoreg, "sanity");
5755 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5756 InstructionMark im(this);
5757 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5758 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5759 // swap src<->dst for encoding
5760 vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5761 emit_int8(0x38);
5762 emit_operand(dst, src);
5763 // 0x01 - insert into upper 128 bits
5764 emit_int8(0x01);
5765 }
5766
5767 void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) {
5768 assert(VM_Version::supports_avx(), "");
5769 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5770 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5771 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5772 emit_int8(0x39);
5773 emit_int8((unsigned char)(0xC0 | encode));
5774 // 0x00 - insert into lower 128 bits
5775 // 0x01 - insert into upper 128 bits
5776 emit_int8(0x01);
5777 }
5778
5779 void Assembler::vextracti128h(Address dst, XMMRegister src) {
5780 assert(VM_Version::supports_avx2(), "");
5781 assert(src != xnoreg, "sanity");
5782 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5783 InstructionMark im(this);
5784 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5785 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5786 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5787 emit_int8(0x39);
5788 emit_operand(src, dst);
5789 // 0x01 - extract from upper 128 bits
5790 emit_int8(0x01);
5791 }
5792
5793 void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src, int value) {
5794 assert(VM_Version::supports_evex(), "");
5795 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5796 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5797 emit_int8(0x3B);
5798 emit_int8((unsigned char)(0xC0 | encode));
5799 // 0x00 - extract from lower 256 bits
5800 // 0x01 - extract from upper 256 bits
5801 emit_int8(value & 0x01);
5802 }
5803
5804 void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) {
5805 assert(VM_Version::supports_evex(), "");
5806 InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5807 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5808 emit_int8(0x39);
5809 emit_int8((unsigned char)(0xC0 | encode));
5810 // 0x01 - extract from bits 255:128
5811 // 0x02 - extract from bits 383:256
5812 // 0x03 - extract from bits 511:384
5813 emit_int8(value & 0x3);
5814 }
5815
5816 void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src, int value) {
5817 assert(VM_Version::supports_evex(), "");
5818 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5819 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5820 emit_int8(0x1B);
5821 emit_int8((unsigned char)(0xC0 | encode));
5822 // 0x00 - extract from lower 256 bits
5823 // 0x01 - extract from upper 256 bits
5824 emit_int8(value & 0x1);
5825 }
5826
5827 void Assembler::vextractf64x4h(Address dst, XMMRegister src, int value) {
5828 assert(VM_Version::supports_evex(), "");
5829 assert(src != xnoreg, "sanity");
5830 InstructionMark im(this);
5831 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5832 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit);
5833 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5834 emit_int8(0x1B);
5835 emit_operand(src, dst);
5836 // 0x00 - extract from lower 256 bits
5837 // 0x01 - extract from upper 256 bits
5838 emit_int8(value & 0x01);
5839 }
5840
5841 void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) {
5842 assert(VM_Version::supports_avx(), "");
5843 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5844 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5845 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5846 emit_int8(0x19);
5847 emit_int8((unsigned char)(0xC0 | encode));
5848 // 0x00 - extract from bits 127:0
5849 // 0x01 - extract from bits 255:128
5850 // 0x02 - extract from bits 383:256
5851 // 0x03 - extract from bits 511:384
5852 emit_int8(value & 0x3);
5853 }
5854
5855 void Assembler::vextractf32x4h(Address dst, XMMRegister src, int value) {
5856 assert(VM_Version::supports_evex(), "");
5857 assert(src != xnoreg, "sanity");
5858 InstructionMark im(this);
5859 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5860 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5861 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5862 emit_int8(0x19);
5863 emit_operand(src, dst);
5864 // 0x00 - extract from bits 127:0
5865 // 0x01 - extract from bits 255:128
5866 // 0x02 - extract from bits 383:256
5867 // 0x03 - extract from bits 511:384
5868 emit_int8(value & 0x3);
5869 }
5870
5871 void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) {
5872 assert(VM_Version::supports_evex(), "");
5873 InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5874 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5875 emit_int8(0x19);
5876 emit_int8((unsigned char)(0xC0 | encode));
5877 // 0x01 - extract from bits 255:128
5878 // 0x02 - extract from bits 383:256
5879 // 0x03 - extract from bits 511:384
5880 emit_int8(value & 0x3);
5881 }
5882
5883 // duplicate 4-bytes integer data from src into 8 locations in dest
5884 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
5885 assert(VM_Version::supports_avx2(), "");
5886 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5887 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5888 emit_int8(0x58);
5889 emit_int8((unsigned char)(0xC0 | encode));
5890 }
5891
5892 // duplicate 2-bytes integer data from src into 16 locations in dest
5893 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
5894 assert(VM_Version::supports_avx2(), "");
5895 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5896 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5897 emit_int8(0x79);
5898 emit_int8((unsigned char)(0xC0 | encode));
5899 }
5900
|
5594 assert(UseAVX > 0, "requires some form of AVX");
5595 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5596 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5597 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5598 emit_int8((unsigned char)0xEF);
5599 emit_int8((unsigned char)(0xC0 | encode));
5600 }
5601
5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5603 assert(UseAVX > 0, "requires some form of AVX");
5604 InstructionMark im(this);
5605 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5606 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5607 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5608 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5609 emit_int8((unsigned char)0xEF);
5610 emit_operand(dst, src);
5611 }
5612
5613
5614 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5615 assert(VM_Version::supports_avx(), "");
5616 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5617 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5618 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5619 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5620 emit_int8(0x18);
5621 emit_int8((unsigned char)(0xC0 | encode));
5622 // 0x00 - insert into lower 128 bits
5623 // 0x01 - insert into upper 128 bits
5624 emit_int8(imm8 & 0x01);
5625 }
5626
5627 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5628 assert(VM_Version::supports_evex(), "");
5629 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5630 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5631 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5632 emit_int8(0x1A);
5633 emit_int8((unsigned char)(0xC0 | encode));
5634 // 0x00 - insert into lower 256 bits
5635 // 0x01 - insert into upper 256 bits
5636 emit_int8(imm8 & 0x01);
5637 }
5638
5639 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5640 assert(VM_Version::supports_evex(), "");
5641 assert(dst != xnoreg, "sanity");
5642 InstructionMark im(this);
5643 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5644 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5645 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
5646 // swap src<->dst for encoding
5647 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5648 emit_int8(0x1A);
5649 emit_operand(dst, src);
5650 // 0x00 - insert into lower 256 bits
5651 // 0x01 - insert into upper 128 bits
5652 emit_int8(imm8 & 0x01);
5653 }
5654
5655 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5656 assert(VM_Version::supports_evex(), "");
5657 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5658 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5659 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5660 emit_int8(0x18);
5661 emit_int8((unsigned char)(0xC0 | encode));
5662 // 0x00 - insert into q0 128 bits (0..127)
5663 // 0x01 - insert into q1 128 bits (128..255)
5664 // 0x02 - insert into q2 128 bits (256..383)
5665 // 0x03 - insert into q3 128 bits (384..511)
5666 emit_int8(imm8 & 0x3);
5667 }
5668
5669 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5670 assert(VM_Version::supports_avx(), "");
5671 assert(dst != xnoreg, "sanity");
5672 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5673 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5674 InstructionMark im(this);
5675 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5676 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5677 // swap src<->dst for encoding
5678 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5679 emit_int8(0x18);
5680 emit_operand(dst, src);
5681 // 0x00 - insert into q0 128 bits (0..127)
5682 // 0x01 - insert into q1 128 bits (128..255)
5683 // 0x02 - insert into q2 128 bits (256..383)
5684 // 0x03 - insert into q3 128 bits (384..511)
5685 emit_int8(imm8 & 0x3);
5686 }
5687
5688 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5689 assert(VM_Version::supports_avx(), "");
5690 assert(dst != xnoreg, "sanity");
5691 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5692 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5693 InstructionMark im(this);
5694 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5695 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5696 // swap src<->dst for encoding
5697 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5698 emit_int8(0x18);
5699 emit_operand(dst, src);
5700 // 0x00 - insert into lower 128 bits
5701 // 0x01 - insert into upper 128 bits
5702 emit_int8(imm8 & 0x01);
5703 }
5704
5705 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, int imm8) {
5706 assert(VM_Version::supports_avx(), "");
5707 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5708 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5709 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5710 emit_int8(0x19);
5711 emit_int8((unsigned char)(0xC0 | encode));
5712 // 0x00 - extract from lower 128 bits
5713 // 0x01 - extract from upper 128 bits
5714 emit_int8(imm8 & 0x01);
5715 }
5716
5717 void Assembler::vextractf128(Address dst, XMMRegister src, int imm8) {
5718 assert(VM_Version::supports_avx(), "");
5719 assert(src != xnoreg, "sanity");
5720 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5721 InstructionMark im(this);
5722 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5723 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5724 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5725 emit_int8(0x19);
5726 emit_operand(src, dst);
5727 // 0x00 - extract from lower 128 bits
5728 // 0x01 - extract from upper 128 bits
5729 emit_int8(imm8 & 0x01);
5730 }
5731
5732 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5733 assert(VM_Version::supports_avx2(), "");
5734 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5735 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5736 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5737 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5738 emit_int8(0x38);
5739 emit_int8((unsigned char)(0xC0 | encode));
5740 // 0x00 - insert into lower 128 bits
5741 // 0x01 - insert into upper 128 bits
5742 emit_int8(imm8 & 0x01);
5743 }
5744
5745 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5746 assert(VM_Version::supports_evex(), "");
5747 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5748 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5749 int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5750 emit_int8(0x38);
5751 emit_int8((unsigned char)(0xC0 | encode));
5752 // 0x00 - insert into lower 256 bits
5753 // 0x01 - insert into upper 256 bits
5754 emit_int8(imm8 & 0x01);
5755 }
5756
5757 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5758 assert(VM_Version::supports_avx2(), "");
5759 assert(dst != xnoreg, "sanity");
5760 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5761 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5762 InstructionMark im(this);
5763 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5764 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5765 // swap src<->dst for encoding
5766 vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5767 emit_int8(0x38);
5768 emit_operand(dst, src);
5769 // 0x00 - insert into lower 128 bits
5770 // 0x01 - insert into upper 128 bits
5771 emit_int8(imm8 & 0x01);
5772 }
5773
5774 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, int imm8) {
5775 assert(VM_Version::supports_avx(), "");
5776 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5777 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5778 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5779 emit_int8(0x39);
5780 emit_int8((unsigned char)(0xC0 | encode));
5781 // 0x00 - extract from lower 128 bits
5782 // 0x01 - extract from upper 128 bits
5783 emit_int8(imm8 & 0x01);
5784 }
5785
5786 void Assembler::vextracti128(Address dst, XMMRegister src, int imm8) {
5787 assert(VM_Version::supports_avx2(), "");
5788 assert(src != xnoreg, "sanity");
5789 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5790 InstructionMark im(this);
5791 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5792 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5793 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5794 emit_int8(0x39);
5795 emit_operand(src, dst);
5796 // 0x00 - extract from lower 128 bits
5797 // 0x01 - extract from upper 128 bits
5798 emit_int8(imm8 & 0x01);
5799 }
5800
5801 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, int imm8) {
5802 assert(VM_Version::supports_evex(), "");
5803 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5804 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5805 emit_int8(0x3B);
5806 emit_int8((unsigned char)(0xC0 | encode));
5807 // 0x00 - extract from lower 256 bits
5808 // 0x01 - extract from upper 256 bits
5809 emit_int8(imm8 & 0x01);
5810 }
5811
5812 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, int imm8) {
5813 assert(VM_Version::supports_evex(), "");
5814 InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5815 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5816 emit_int8(0x39);
5817 emit_int8((unsigned char)(0xC0 | encode));
5818 // 0x00 - extract from bits 127:0
5819 // 0x01 - extract from bits 255:128
5820 // 0x02 - extract from bits 383:256
5821 // 0x03 - extract from bits 511:384
5822 emit_int8(imm8 & 0x3);
5823 }
5824
5825 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, int imm8) {
5826 assert(VM_Version::supports_evex(), "");
5827 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5828 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5829 emit_int8(0x1B);
5830 emit_int8((unsigned char)(0xC0 | encode));
5831 // 0x00 - extract from lower 256 bits
5832 // 0x01 - extract from upper 256 bits
5833 emit_int8(imm8 & 0x1);
5834 }
5835
5836 void Assembler::vextractf64x4(Address dst, XMMRegister src, int imm8) {
5837 assert(VM_Version::supports_evex(), "");
5838 assert(src != xnoreg, "sanity");
5839 InstructionMark im(this);
5840 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5841 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit);
5842 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5843 emit_int8(0x1B);
5844 emit_operand(src, dst);
5845 // 0x00 - extract from lower 256 bits
5846 // 0x01 - extract from upper 256 bits
5847 emit_int8(imm8 & 0x01);
5848 }
5849
5850 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, int imm8) {
5851 assert(VM_Version::supports_avx(), "");
5852 int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5853 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5854 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5855 emit_int8(0x19);
5856 emit_int8((unsigned char)(0xC0 | encode));
5857 // 0x00 - extract from bits 127:0
5858 // 0x01 - extract from bits 255:128
5859 // 0x02 - extract from bits 383:256
5860 // 0x03 - extract from bits 511:384
5861 emit_int8(imm8 & 0x3);
5862 }
5863
5864 void Assembler::vextractf32x4(Address dst, XMMRegister src, int imm8) {
5865 assert(VM_Version::supports_evex(), "");
5866 assert(src != xnoreg, "sanity");
5867 InstructionMark im(this);
5868 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5869 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5870 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5871 emit_int8(0x19);
5872 emit_operand(src, dst);
5873 // 0x00 - extract from bits 127:0
5874 // 0x01 - extract from bits 255:128
5875 // 0x02 - extract from bits 383:256
5876 // 0x03 - extract from bits 511:384
5877 emit_int8(imm8 & 0x3);
5878 }
5879
5880 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, int imm8) {
5881 assert(VM_Version::supports_evex(), "");
5882 InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5883 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5884 emit_int8(0x19);
5885 emit_int8((unsigned char)(0xC0 | encode));
5886 // 0x00 - extract from bits 127:0
5887 // 0x01 - extract from bits 255:128
5888 // 0x02 - extract from bits 383:256
5889 // 0x03 - extract from bits 511:384
5890 emit_int8(imm8 & 0x3);
5891 }
5892
5893 // duplicate 4-bytes integer data from src into 8 locations in dest
5894 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
5895 assert(VM_Version::supports_avx2(), "");
5896 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5897 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5898 emit_int8(0x58);
5899 emit_int8((unsigned char)(0xC0 | encode));
5900 }
5901
5902 // duplicate 2-bytes integer data from src into 16 locations in dest
5903 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
5904 assert(VM_Version::supports_avx2(), "");
5905 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5906 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5907 emit_int8(0x79);
5908 emit_int8((unsigned char)(0xC0 | encode));
5909 }
5910
|