src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.cpp

Print this page
rev 10354 : imported patch vextrinscleanup2
rev 10355 : [mq]: vextrinscleanup3
rev 10357 : [mq]: vextrinscleanup5


5594   assert(UseAVX > 0, "requires some form of AVX");
5595   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5596   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5597   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5598   emit_int8((unsigned char)0xEF);
5599   emit_int8((unsigned char)(0xC0 | encode));
5600 }
5601 
5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5603   assert(UseAVX > 0, "requires some form of AVX");
5604   InstructionMark im(this);
5605   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5606   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5607   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5608   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5609   emit_int8((unsigned char)0xEF);
5610   emit_operand(dst, src);
5611 }
5612 
5613 
5614 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5615   assert(VM_Version::supports_avx(), "");

5616   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5617   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5618   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5619   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5620   emit_int8(0x18);
5621   emit_int8((unsigned char)(0xC0 | encode));
5622   // 0x00 - insert into lower 128 bits
5623   // 0x01 - insert into upper 128 bits
5624   emit_int8(0x01);
5625 }
5626 
5627 void Assembler::vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
5628   assert(VM_Version::supports_evex(), "");

5629   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5630   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5631   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5632   emit_int8(0x1A);
5633   emit_int8((unsigned char)(0xC0 | encode));
5634   // 0x00 - insert into lower 256 bits
5635   // 0x01 - insert into upper 256 bits
5636   emit_int8(value & 0x01);
5637 }
5638 
5639 void Assembler::vinsertf64x4h(XMMRegister dst, Address src, int value) {
5640   assert(VM_Version::supports_evex(), "");
5641   assert(dst != xnoreg, "sanity");

5642   InstructionMark im(this);
5643   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);

5644   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
5645   // swap src<->dst for encoding
5646   vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5647   emit_int8(0x1A);
5648   emit_operand(dst, src);
5649   // 0x00 - insert into lower 256 bits
5650   // 0x01 - insert into upper 128 bits
5651   emit_int8(value & 0x01);
5652 }
5653 
5654 void Assembler::vinsertf32x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
5655   assert(VM_Version::supports_evex(), "");

5656   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5657   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5658   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5659   emit_int8(0x18);
5660   emit_int8((unsigned char)(0xC0 | encode));
5661   // 0x00 - insert into q0 128 bits (0..127)
5662   // 0x01 - insert into q1 128 bits (128..255)
5663   // 0x02 - insert into q2 128 bits (256..383)
5664   // 0x03 - insert into q3 128 bits (384..511)
5665   emit_int8(value & 0x3);
5666 }
5667 
5668 void Assembler::vinsertf32x4h(XMMRegister dst, Address src, int value) {
5669   assert(VM_Version::supports_avx(), "");
5670   assert(dst != xnoreg, "sanity");

5671   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;

5672   InstructionMark im(this);
5673   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5674   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5675   // swap src<->dst for encoding
5676   vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5677   emit_int8(0x18);
5678   emit_operand(dst, src);
5679   // 0x00 - insert into q0 128 bits (0..127)
5680   // 0x01 - insert into q1 128 bits (128..255)
5681   // 0x02 - insert into q2 128 bits (256..383)
5682   // 0x03 - insert into q3 128 bits (384..511)
5683   emit_int8(value & 0x3);
5684 }
5685 
5686 void Assembler::vinsertf128h(XMMRegister dst, Address src) {
5687   assert(VM_Version::supports_avx(), "");
5688   assert(dst != xnoreg, "sanity");

5689   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;

5690   InstructionMark im(this);
5691   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5692   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5693   // swap src<->dst for encoding
5694   vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5695   emit_int8(0x18);
5696   emit_operand(dst, src);

5697   // 0x01 - insert into upper 128 bits
5698   emit_int8(0x01);
5699 }
5700 
5701 void Assembler::vextractf128h(XMMRegister dst, XMMRegister src) {
5702   assert(VM_Version::supports_avx(), "");

5703   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5704   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5705   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5706   emit_int8(0x19);
5707   emit_int8((unsigned char)(0xC0 | encode));
5708   // 0x00 - insert into lower 128 bits
5709   // 0x01 - insert into upper 128 bits
5710   emit_int8(0x01);
5711 }
5712 
5713 void Assembler::vextractf128h(Address dst, XMMRegister src) {
5714   assert(VM_Version::supports_avx(), "");
5715   assert(src != xnoreg, "sanity");

5716   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5717   InstructionMark im(this);
5718   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5719   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5720   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5721   emit_int8(0x19);
5722   emit_operand(src, dst);

5723   // 0x01 - extract from upper 128 bits
5724   emit_int8(0x01);
5725 }
5726 
5727 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5728   assert(VM_Version::supports_avx2(), "");

5729   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5730   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5731   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5732   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5733   emit_int8(0x38);
5734   emit_int8((unsigned char)(0xC0 | encode));
5735   // 0x00 - insert into lower 128 bits
5736   // 0x01 - insert into upper 128 bits
5737   emit_int8(0x01);
5738 }
5739 
5740 void Assembler::vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value) {
5741   assert(VM_Version::supports_evex(), "");

5742   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5743   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5744   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5745   emit_int8(0x38);
5746   emit_int8((unsigned char)(0xC0 | encode));
5747   // 0x00 - insert into lower 256 bits
5748   // 0x01 - insert into upper 256 bits
5749   emit_int8(value & 0x01);
5750 }
5751 
5752 void Assembler::vinserti128h(XMMRegister dst, Address src) {
5753   assert(VM_Version::supports_avx2(), "");
5754   assert(dst != xnoreg, "sanity");

5755   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;

5756   InstructionMark im(this);
5757   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5758   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5759   // swap src<->dst for encoding
5760   vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5761   emit_int8(0x38);
5762   emit_operand(dst, src);

5763   // 0x01 - insert into upper 128 bits
5764   emit_int8(0x01);
5765 }
5766 
5767 void Assembler::vextracti128h(XMMRegister dst, XMMRegister src) {
5768   assert(VM_Version::supports_avx(), "");

5769   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5770   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5771   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5772   emit_int8(0x39);
5773   emit_int8((unsigned char)(0xC0 | encode));
5774   // 0x00 - insert into lower 128 bits
5775   // 0x01 - insert into upper 128 bits
5776   emit_int8(0x01);
5777 }
5778 
5779 void Assembler::vextracti128h(Address dst, XMMRegister src) {
5780   assert(VM_Version::supports_avx2(), "");
5781   assert(src != xnoreg, "sanity");

5782   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5783   InstructionMark im(this);
5784   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5785   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5786   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5787   emit_int8(0x39);
5788   emit_operand(src, dst);

5789   // 0x01 - extract from upper 128 bits
5790   emit_int8(0x01);
5791 }
5792 
5793 void Assembler::vextracti64x4h(XMMRegister dst, XMMRegister src, int value) {
5794   assert(VM_Version::supports_evex(), "");

5795   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5796   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5797   emit_int8(0x3B);
5798   emit_int8((unsigned char)(0xC0 | encode));
5799   // 0x00 - extract from lower 256 bits
5800   // 0x01 - extract from upper 256 bits
5801   emit_int8(value & 0x01);
5802 }
5803 
5804 void Assembler::vextracti64x2h(XMMRegister dst, XMMRegister src, int value) {
5805   assert(VM_Version::supports_evex(), "");

5806   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5807   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5808   emit_int8(0x39);
5809   emit_int8((unsigned char)(0xC0 | encode));

5810   // 0x01 - extract from bits 255:128
5811   // 0x02 - extract from bits 383:256
5812   // 0x03 - extract from bits 511:384
5813   emit_int8(value & 0x3);
5814 }
5815 
5816 void Assembler::vextractf64x4h(XMMRegister dst, XMMRegister src, int value) {
5817   assert(VM_Version::supports_evex(), "");

5818   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5819   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5820   emit_int8(0x1B);
5821   emit_int8((unsigned char)(0xC0 | encode));
5822   // 0x00 - extract from lower 256 bits
5823   // 0x01 - extract from upper 256 bits
5824   emit_int8(value & 0x1);
5825 }
5826 
5827 void Assembler::vextractf64x4h(Address dst, XMMRegister src, int value) {
5828   assert(VM_Version::supports_evex(), "");
5829   assert(src != xnoreg, "sanity");

5830   InstructionMark im(this);
5831   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5832   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
5833   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5834   emit_int8(0x1B);
5835   emit_operand(src, dst);
5836   // 0x00 - extract from lower 256 bits
5837   // 0x01 - extract from upper 256 bits
5838   emit_int8(value & 0x01);
5839 }
5840 
5841 void Assembler::vextractf32x4h(XMMRegister dst, XMMRegister src, int value) {
5842   assert(VM_Version::supports_avx(), "");

5843   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5844   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5845   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5846   emit_int8(0x19);
5847   emit_int8((unsigned char)(0xC0 | encode));
5848   // 0x00 - extract from bits 127:0
5849   // 0x01 - extract from bits 255:128
5850   // 0x02 - extract from bits 383:256
5851   // 0x03 - extract from bits 511:384
5852   emit_int8(value & 0x3);
5853 }
5854 
5855 void Assembler::vextractf32x4h(Address dst, XMMRegister src, int value) {
5856   assert(VM_Version::supports_evex(), "");
5857   assert(src != xnoreg, "sanity");

5858   InstructionMark im(this);
5859   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5860   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5861   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5862   emit_int8(0x19);
5863   emit_operand(src, dst);
5864   // 0x00 - extract from bits 127:0
5865   // 0x01 - extract from bits 255:128
5866   // 0x02 - extract from bits 383:256
5867   // 0x03 - extract from bits 511:384
5868   emit_int8(value & 0x3);
5869 }
5870 
5871 void Assembler::vextractf64x2h(XMMRegister dst, XMMRegister src, int value) {
5872   assert(VM_Version::supports_evex(), "");

5873   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5874   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5875   emit_int8(0x19);
5876   emit_int8((unsigned char)(0xC0 | encode));

5877   // 0x01 - extract from bits 255:128
5878   // 0x02 - extract from bits 383:256
5879   // 0x03 - extract from bits 511:384
5880   emit_int8(value & 0x3);
5881 }
5882 
5883 // duplicate 4-bytes integer data from src into 8 locations in dest
5884 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
5885   assert(VM_Version::supports_avx2(), "");
5886   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5887   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5888   emit_int8(0x58);
5889   emit_int8((unsigned char)(0xC0 | encode));
5890 }
5891 
5892 // duplicate 2-bytes integer data from src into 16 locations in dest
5893 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
5894   assert(VM_Version::supports_avx2(), "");
5895   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5896   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5897   emit_int8(0x79);
5898   emit_int8((unsigned char)(0xC0 | encode));
5899 }
5900 




5594   assert(UseAVX > 0, "requires some form of AVX");
5595   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5596   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5597   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5598   emit_int8((unsigned char)0xEF);
5599   emit_int8((unsigned char)(0xC0 | encode));
5600 }
5601 
5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5603   assert(UseAVX > 0, "requires some form of AVX");
5604   InstructionMark im(this);
5605   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5606   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5607   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5608   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5609   emit_int8((unsigned char)0xEF);
5610   emit_operand(dst, src);
5611 }
5612 
5613 
5614 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
5615   assert(VM_Version::supports_avx(), "");
5616   assert(imm8 <= 0x01, "imm8: %u", imm8);
5617   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5618   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5619   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5620   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5621   emit_int8(0x18);
5622   emit_int8((unsigned char)(0xC0 | encode));
5623   // 0x00 - insert into lower 128 bits
5624   // 0x01 - insert into upper 128 bits
5625   emit_int8(imm8 & 0x01);
5626 }
5627 
5628 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
5629   assert(VM_Version::supports_evex(), "");
5630   assert(imm8 <= 0x01, "imm8: %u", imm8);
5631   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5632   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5633   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5634   emit_int8(0x1A);
5635   emit_int8((unsigned char)(0xC0 | encode));
5636   // 0x00 - insert into lower 256 bits
5637   // 0x01 - insert into upper 256 bits
5638   emit_int8(imm8 & 0x01);
5639 }
5640 
5641 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
5642   assert(VM_Version::supports_evex(), "");
5643   assert(dst != xnoreg, "sanity");
5644   assert(imm8 <= 0x01, "imm8: %u", imm8);
5645   InstructionMark im(this);
5646   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5647   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5648   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
5649   // swap src<->dst for encoding
5650   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5651   emit_int8(0x1A);
5652   emit_operand(dst, src);
5653   // 0x00 - insert into lower 256 bits
5654   // 0x01 - insert into upper 256 bits
5655   emit_int8(imm8 & 0x01);
5656 }
5657 
5658 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
5659   assert(VM_Version::supports_evex(), "");
5660   assert(imm8 <= 0x03, "imm8: %u", imm8);
5661   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5662   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5663   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5664   emit_int8(0x18);
5665   emit_int8((unsigned char)(0xC0 | encode));
5666   // 0x00 - insert into q0 128 bits (0..127)
5667   // 0x01 - insert into q1 128 bits (128..255)
5668   // 0x02 - insert into q2 128 bits (256..383)
5669   // 0x03 - insert into q3 128 bits (384..511)
5670   emit_int8(imm8 & 0x03);
5671 }
5672 
5673 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
5674   assert(VM_Version::supports_avx(), "");
5675   assert(dst != xnoreg, "sanity");
5676   assert(imm8 <= 0x03, "imm8: %u", imm8);
5677   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5678   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5679   InstructionMark im(this);
5680   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5681   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5682   // swap src<->dst for encoding
5683   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5684   emit_int8(0x18);
5685   emit_operand(dst, src);
5686   // 0x00 - insert into q0 128 bits (0..127)
5687   // 0x01 - insert into q1 128 bits (128..255)
5688   // 0x02 - insert into q2 128 bits (256..383)
5689   // 0x03 - insert into q3 128 bits (384..511)
5690   emit_int8(imm8 & 0x03);
5691 }
5692 
5693 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
5694   assert(VM_Version::supports_avx(), "");
5695   assert(dst != xnoreg, "sanity");
5696   assert(imm8 <= 0x01, "imm8: %u", imm8);
5697   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5698   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5699   InstructionMark im(this);
5700   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5701   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5702   // swap src<->dst for encoding
5703   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5704   emit_int8(0x18);
5705   emit_operand(dst, src);
5706   // 0x00 - insert into lower 128 bits
5707   // 0x01 - insert into upper 128 bits
5708   emit_int8(imm8 & 0x01);
5709 }
5710 
5711 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5712   assert(VM_Version::supports_avx(), "");
5713   assert(imm8 <= 0x01, "imm8: %u", imm8);
5714   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5715   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5716   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5717   emit_int8(0x19);
5718   emit_int8((unsigned char)(0xC0 | encode));
5719   // 0x00 - extract from lower 128 bits
5720   // 0x01 - extract from upper 128 bits
5721   emit_int8(imm8 & 0x01);
5722 }
5723 
5724 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
5725   assert(VM_Version::supports_avx(), "");
5726   assert(src != xnoreg, "sanity");
5727   assert(imm8 <= 0x01, "imm8: %u", imm8);
5728   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5729   InstructionMark im(this);
5730   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5731   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5732   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5733   emit_int8(0x19);
5734   emit_operand(src, dst);
5735   // 0x00 - extract from lower 128 bits
5736   // 0x01 - extract from upper 128 bits
5737   emit_int8(imm8 & 0x01);
5738 }
5739 
5740 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
5741   assert(VM_Version::supports_avx2(), "");
5742   assert(imm8 <= 0x01, "imm8: %u", imm8);
5743   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5744   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5745   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5746   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5747   emit_int8(0x38);
5748   emit_int8((unsigned char)(0xC0 | encode));
5749   // 0x00 - insert into lower 128 bits
5750   // 0x01 - insert into upper 128 bits
5751   emit_int8(imm8 & 0x01);
5752 }
5753 
5754 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
5755   assert(VM_Version::supports_evex(), "");
5756   assert(imm8 <= 0x01, "imm8: %u", imm8);
5757   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5758   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5759   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5760   emit_int8(0x38);
5761   emit_int8((unsigned char)(0xC0 | encode));
5762   // 0x00 - insert into lower 256 bits
5763   // 0x01 - insert into upper 256 bits
5764   emit_int8(imm8 & 0x01);
5765 }
5766 
5767 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
5768   assert(VM_Version::supports_avx2(), "");
5769   assert(dst != xnoreg, "sanity");
5770   assert(imm8 <= 0x01, "imm8: %u", imm8);
5771   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5772   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5773   InstructionMark im(this);
5774   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5775   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5776   // swap src<->dst for encoding
5777   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5778   emit_int8(0x38);
5779   emit_operand(dst, src);
5780   // 0x00 - insert into lower 128 bits
5781   // 0x01 - insert into upper 128 bits
5782   emit_int8(imm8 & 0x01);
5783 }
5784 
5785 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5786   assert(VM_Version::supports_avx(), "");
5787   assert(imm8 <= 0x01, "imm8: %u", imm8);
5788   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5789   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5790   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5791   emit_int8(0x39);
5792   emit_int8((unsigned char)(0xC0 | encode));
5793   // 0x00 - extract from lower 128 bits
5794   // 0x01 - extract from upper 128 bits
5795   emit_int8(imm8 & 0x01);
5796 }
5797 
5798 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
5799   assert(VM_Version::supports_avx2(), "");
5800   assert(src != xnoreg, "sanity");
5801   assert(imm8 <= 0x01, "imm8: %u", imm8);
5802   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5803   InstructionMark im(this);
5804   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5805   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5806   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5807   emit_int8(0x39);
5808   emit_operand(src, dst);
5809   // 0x00 - extract from lower 128 bits
5810   // 0x01 - extract from upper 128 bits
5811   emit_int8(imm8 & 0x01);
5812 }
5813 
5814 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5815   assert(VM_Version::supports_evex(), "");
5816   assert(imm8 <= 0x01, "imm8: %u", imm8);
5817   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5818   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5819   emit_int8(0x3B);
5820   emit_int8((unsigned char)(0xC0 | encode));
5821   // 0x00 - extract from lower 256 bits
5822   // 0x01 - extract from upper 256 bits
5823   emit_int8(imm8 & 0x01);
5824 }
5825 
5826 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5827   assert(VM_Version::supports_evex(), "");
5828   assert(imm8 <= 0x03, "imm8: %u", imm8);
5829   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5830   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5831   emit_int8(0x39);
5832   emit_int8((unsigned char)(0xC0 | encode));
5833   // 0x00 - extract from bits 127:0
5834   // 0x01 - extract from bits 255:128
5835   // 0x02 - extract from bits 383:256
5836   // 0x03 - extract from bits 511:384
5837   emit_int8(imm8 & 0x03);
5838 }
5839 
5840 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5841   assert(VM_Version::supports_evex(), "");
5842   assert(imm8 <= 0x01, "imm8: %u", imm8);
5843   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5844   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5845   emit_int8(0x1B);
5846   emit_int8((unsigned char)(0xC0 | encode));
5847   // 0x00 - extract from lower 256 bits
5848   // 0x01 - extract from upper 256 bits
5849   emit_int8(imm8 & 0x01);
5850 }
5851 
5852 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
5853   assert(VM_Version::supports_evex(), "");
5854   assert(src != xnoreg, "sanity");
5855   assert(imm8 <= 0x01, "imm8: %u", imm8);
5856   InstructionMark im(this);
5857   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5858   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
5859   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5860   emit_int8(0x1B);
5861   emit_operand(src, dst);
5862   // 0x00 - extract from lower 256 bits
5863   // 0x01 - extract from upper 256 bits
5864   emit_int8(imm8 & 0x01);
5865 }
5866 
5867 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5868   assert(VM_Version::supports_avx(), "");
5869   assert(imm8 <= 0x03, "imm8: %u", imm8);
5870   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5871   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5872   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5873   emit_int8(0x19);
5874   emit_int8((unsigned char)(0xC0 | encode));
5875   // 0x00 - extract from bits 127:0
5876   // 0x01 - extract from bits 255:128
5877   // 0x02 - extract from bits 383:256
5878   // 0x03 - extract from bits 511:384
5879   emit_int8(imm8 & 0x03);
5880 }
5881 
5882 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
5883   assert(VM_Version::supports_evex(), "");
5884   assert(src != xnoreg, "sanity");
5885   assert(imm8 <= 0x03, "imm8: %u", imm8);
5886   InstructionMark im(this);
5887   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5888   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5889   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5890   emit_int8(0x19);
5891   emit_operand(src, dst);
5892   // 0x00 - extract from bits 127:0
5893   // 0x01 - extract from bits 255:128
5894   // 0x02 - extract from bits 383:256
5895   // 0x03 - extract from bits 511:384
5896   emit_int8(imm8 & 0x03);
5897 }
5898 
5899 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
5900   assert(VM_Version::supports_evex(), "");
5901   assert(imm8 <= 0x03, "imm8: %u", imm8);
5902   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5903   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5904   emit_int8(0x19);
5905   emit_int8((unsigned char)(0xC0 | encode));
5906   // 0x00 - extract from bits 127:0
5907   // 0x01 - extract from bits 255:128
5908   // 0x02 - extract from bits 383:256
5909   // 0x03 - extract from bits 511:384
5910   emit_int8(imm8 & 0x03);
5911 }
5912 
5913 // duplicate 4-bytes integer data from src into 8 locations in dest
5914 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
5915   assert(VM_Version::supports_avx2(), "");
5916   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5917   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5918   emit_int8(0x58);
5919   emit_int8((unsigned char)(0xC0 | encode));
5920 }
5921 
5922 // duplicate 2-bytes integer data from src into 16 locations in dest
5923 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
5924   assert(VM_Version::supports_avx2(), "");
5925   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5926   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5927   emit_int8(0x79);
5928   emit_int8((unsigned char)(0xC0 | encode));
5929 }
5930 


src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File