src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.cpp

Print this page
rev 10354 : imported patch vextrinscleanup2
rev 10355 : [mq]: vextrinscleanup3


5596   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5597   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5598   emit_int8((unsigned char)0xEF);
5599   emit_int8((unsigned char)(0xC0 | encode));
5600 }
5601 
5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5603   assert(UseAVX > 0, "requires some form of AVX");
5604   InstructionMark im(this);
5605   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5606   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5607   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5608   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5609   emit_int8((unsigned char)0xEF);
5610   emit_operand(dst, src);
5611 }
5612 
5613 
5614 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5615   assert(VM_Version::supports_avx(), "");

5616   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5617   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5618   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5619   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5620   emit_int8(0x18);
5621   emit_int8((unsigned char)(0xC0 | encode));
5622   // 0x00 - insert into lower 128 bits
5623   // 0x01 - insert into upper 128 bits
5624   emit_int8(imm8 & 0x01);
5625 }
5626 
5627 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5628   assert(VM_Version::supports_evex(), "");

5629   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5630   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5631   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5632   emit_int8(0x1A);
5633   emit_int8((unsigned char)(0xC0 | encode));
5634   // 0x00 - insert into lower 256 bits
5635   // 0x01 - insert into upper 256 bits
5636   emit_int8(imm8 & 0x01);
5637 }
5638 
5639 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5640   assert(VM_Version::supports_evex(), "");
5641   assert(dst != xnoreg, "sanity");

5642   InstructionMark im(this);
5643   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5644   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5645   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
5646   // swap src<->dst for encoding
5647   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5648   emit_int8(0x1A);
5649   emit_operand(dst, src);
5650   // 0x00 - insert into lower 256 bits
5651   // 0x01 - insert into upper 128 bits
5652   emit_int8(imm8 & 0x01);
5653 }
5654 
5655 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5656   assert(VM_Version::supports_evex(), "");

5657   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5658   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5659   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5660   emit_int8(0x18);
5661   emit_int8((unsigned char)(0xC0 | encode));
5662   // 0x00 - insert into q0 128 bits (0..127)
5663   // 0x01 - insert into q1 128 bits (128..255)
5664   // 0x02 - insert into q2 128 bits (256..383)
5665   // 0x03 - insert into q3 128 bits (384..511)
5666   emit_int8(imm8 & 0x3);
5667 }
5668 
5669 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5670   assert(VM_Version::supports_avx(), "");
5671   assert(dst != xnoreg, "sanity");

5672   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5673   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5674   InstructionMark im(this);
5675   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5676   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5677   // swap src<->dst for encoding
5678   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5679   emit_int8(0x18);
5680   emit_operand(dst, src);
5681   // 0x00 - insert into q0 128 bits (0..127)
5682   // 0x01 - insert into q1 128 bits (128..255)
5683   // 0x02 - insert into q2 128 bits (256..383)
5684   // 0x03 - insert into q3 128 bits (384..511)
5685   emit_int8(imm8 & 0x3);
5686 }
5687 
5688 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5689   assert(VM_Version::supports_avx(), "");
5690   assert(dst != xnoreg, "sanity");

5691   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5692   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5693   InstructionMark im(this);
5694   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5695   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5696   // swap src<->dst for encoding
5697   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5698   emit_int8(0x18);
5699   emit_operand(dst, src);
5700   // 0x00 - insert into lower 128 bits
5701   // 0x01 - insert into upper 128 bits
5702   emit_int8(imm8 & 0x01);
5703 }
5704 
5705 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, int imm8) {
5706   assert(VM_Version::supports_avx(), "");

5707   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5708   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5709   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5710   emit_int8(0x19);
5711   emit_int8((unsigned char)(0xC0 | encode));
5712   // 0x00 - extract from lower 128 bits
5713   // 0x01 - extract from upper 128 bits
5714   emit_int8(imm8 & 0x01);
5715 }
5716 
5717 void Assembler::vextractf128(Address dst, XMMRegister src, int imm8) {
5718   assert(VM_Version::supports_avx(), "");
5719   assert(src != xnoreg, "sanity");

5720   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5721   InstructionMark im(this);
5722   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5723   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5724   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5725   emit_int8(0x19);
5726   emit_operand(src, dst);
5727   // 0x00 - extract from lower 128 bits
5728   // 0x01 - extract from upper 128 bits
5729   emit_int8(imm8 & 0x01);
5730 }
5731 
5732 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5733   assert(VM_Version::supports_avx2(), "");

5734   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5735   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5736   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5737   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5738   emit_int8(0x38);
5739   emit_int8((unsigned char)(0xC0 | encode));
5740   // 0x00 - insert into lower 128 bits
5741   // 0x01 - insert into upper 128 bits
5742   emit_int8(imm8 & 0x01);
5743 }
5744 
5745 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5746   assert(VM_Version::supports_evex(), "");

5747   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5748   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5749   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5750   emit_int8(0x38);
5751   emit_int8((unsigned char)(0xC0 | encode));
5752   // 0x00 - insert into lower 256 bits
5753   // 0x01 - insert into upper 256 bits
5754   emit_int8(imm8 & 0x01);
5755 }
5756 
5757 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5758   assert(VM_Version::supports_avx2(), "");
5759   assert(dst != xnoreg, "sanity");

5760   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5761   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5762   InstructionMark im(this);
5763   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5764   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5765   // swap src<->dst for encoding
5766   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5767   emit_int8(0x38);
5768   emit_operand(dst, src);
5769   // 0x00 - insert into lower 128 bits
5770   // 0x01 - insert into upper 128 bits
5771   emit_int8(imm8 & 0x01);
5772 }
5773 
5774 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, int imm8) {
5775   assert(VM_Version::supports_avx(), "");

5776   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5777   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5778   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5779   emit_int8(0x39);
5780   emit_int8((unsigned char)(0xC0 | encode));
5781   // 0x00 - extract from lower 128 bits
5782   // 0x01 - extract from upper 128 bits
5783   emit_int8(imm8 & 0x01);
5784 }
5785 
5786 void Assembler::vextracti128(Address dst, XMMRegister src, int imm8) {
5787   assert(VM_Version::supports_avx2(), "");
5788   assert(src != xnoreg, "sanity");

5789   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5790   InstructionMark im(this);
5791   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5792   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5793   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5794   emit_int8(0x39);
5795   emit_operand(src, dst);
5796   // 0x00 - extract from lower 128 bits
5797   // 0x01 - extract from upper 128 bits
5798   emit_int8(imm8 & 0x01);
5799 }
5800 
5801 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, int imm8) {
5802   assert(VM_Version::supports_evex(), "");

5803   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5804   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5805   emit_int8(0x3B);
5806   emit_int8((unsigned char)(0xC0 | encode));
5807   // 0x00 - extract from lower 256 bits
5808   // 0x01 - extract from upper 256 bits
5809   emit_int8(imm8 & 0x01);
5810 }
5811 
5812 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, int imm8) {
5813   assert(VM_Version::supports_evex(), "");

5814   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5815   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5816   emit_int8(0x39);
5817   emit_int8((unsigned char)(0xC0 | encode));
5818   // 0x00 - extract from bits 127:0
5819   // 0x01 - extract from bits 255:128
5820   // 0x02 - extract from bits 383:256
5821   // 0x03 - extract from bits 511:384
5822   emit_int8(imm8 & 0x3);
5823 }
5824 
5825 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, int imm8) {
5826   assert(VM_Version::supports_evex(), "");

5827   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5828   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5829   emit_int8(0x1B);
5830   emit_int8((unsigned char)(0xC0 | encode));
5831   // 0x00 - extract from lower 256 bits
5832   // 0x01 - extract from upper 256 bits
5833   emit_int8(imm8 & 0x1);
5834 }
5835 
5836 void Assembler::vextractf64x4(Address dst, XMMRegister src, int imm8) {
5837   assert(VM_Version::supports_evex(), "");
5838   assert(src != xnoreg, "sanity");

5839   InstructionMark im(this);
5840   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5841   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
5842   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5843   emit_int8(0x1B);
5844   emit_operand(src, dst);
5845   // 0x00 - extract from lower 256 bits
5846   // 0x01 - extract from upper 256 bits
5847   emit_int8(imm8 & 0x01);
5848 }
5849 
5850 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, int imm8) {
5851   assert(VM_Version::supports_avx(), "");

5852   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5853   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5854   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5855   emit_int8(0x19);
5856   emit_int8((unsigned char)(0xC0 | encode));
5857   // 0x00 - extract from bits 127:0
5858   // 0x01 - extract from bits 255:128
5859   // 0x02 - extract from bits 383:256
5860   // 0x03 - extract from bits 511:384
5861   emit_int8(imm8 & 0x3);
5862 }
5863 
5864 void Assembler::vextractf32x4(Address dst, XMMRegister src, int imm8) {
5865   assert(VM_Version::supports_evex(), "");
5866   assert(src != xnoreg, "sanity");

5867   InstructionMark im(this);
5868   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5869   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5870   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5871   emit_int8(0x19);
5872   emit_operand(src, dst);
5873   // 0x00 - extract from bits 127:0
5874   // 0x01 - extract from bits 255:128
5875   // 0x02 - extract from bits 383:256
5876   // 0x03 - extract from bits 511:384
5877   emit_int8(imm8 & 0x3);
5878 }
5879 
5880 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, int imm8) {
5881   assert(VM_Version::supports_evex(), "");

5882   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5883   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5884   emit_int8(0x19);
5885   emit_int8((unsigned char)(0xC0 | encode));
5886   // 0x00 - extract from bits 127:0
5887   // 0x01 - extract from bits 255:128
5888   // 0x02 - extract from bits 383:256
5889   // 0x03 - extract from bits 511:384
5890   emit_int8(imm8 & 0x3);
5891 }
5892 
5893 // duplicate 4-bytes integer data from src into 8 locations in dest
5894 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
5895   assert(VM_Version::supports_avx2(), "");
5896   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5897   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5898   emit_int8(0x58);
5899   emit_int8((unsigned char)(0xC0 | encode));
5900 }
5901 
5902 // duplicate 2-bytes integer data from src into 16 locations in dest
5903 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
5904   assert(VM_Version::supports_avx2(), "");
5905   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5906   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5907   emit_int8(0x79);
5908   emit_int8((unsigned char)(0xC0 | encode));
5909 }
5910 




5596   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5597   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5598   emit_int8((unsigned char)0xEF);
5599   emit_int8((unsigned char)(0xC0 | encode));
5600 }
5601 
5602 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5603   assert(UseAVX > 0, "requires some form of AVX");
5604   InstructionMark im(this);
5605   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5606   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5607   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5608   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5609   emit_int8((unsigned char)0xEF);
5610   emit_operand(dst, src);
5611 }
5612 
5613 
5614 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5615   assert(VM_Version::supports_avx(), "");
5616   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5617   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5618   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5619   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5620   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5621   emit_int8(0x18);
5622   emit_int8((unsigned char)(0xC0 | encode));
5623   // 0x00 - insert into lower 128 bits
5624   // 0x01 - insert into upper 128 bits
5625   emit_int8(imm8 & 0x01);
5626 }
5627 
5628 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5629   assert(VM_Version::supports_evex(), "");
5630   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5631   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5632   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5633   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5634   emit_int8(0x1A);
5635   emit_int8((unsigned char)(0xC0 | encode));
5636   // 0x00 - insert into lower 256 bits
5637   // 0x01 - insert into upper 256 bits
5638   emit_int8(imm8 & 0x01);
5639 }
5640 
5641 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5642   assert(VM_Version::supports_evex(), "");
5643   assert(dst != xnoreg, "sanity");
5644   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5645   InstructionMark im(this);
5646   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5647   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5648   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
5649   // swap src<->dst for encoding
5650   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5651   emit_int8(0x1A);
5652   emit_operand(dst, src);
5653   // 0x00 - insert into lower 256 bits
5654   // 0x01 - insert into upper 256 bits
5655   emit_int8(imm8 & 0x01);
5656 }
5657 
5658 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5659   assert(VM_Version::supports_evex(), "");
5660   assert((unsigned int)imm8 <= 0x03, "imm8: %d\n", imm8);
5661   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5662   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5663   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5664   emit_int8(0x18);
5665   emit_int8((unsigned char)(0xC0 | encode));
5666   // 0x00 - insert into q0 128 bits (0..127)
5667   // 0x01 - insert into q1 128 bits (128..255)
5668   // 0x02 - insert into q2 128 bits (256..383)
5669   // 0x03 - insert into q3 128 bits (384..511)
5670   emit_int8(imm8 & 0x03);
5671 }
5672 
5673 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5674   assert(VM_Version::supports_avx(), "");
5675   assert(dst != xnoreg, "sanity");
5676   assert((unsigned int)imm8 <= 0x03, "imm8: %d\n", imm8);
5677   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5678   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5679   InstructionMark im(this);
5680   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5681   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5682   // swap src<->dst for encoding
5683   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5684   emit_int8(0x18);
5685   emit_operand(dst, src);
5686   // 0x00 - insert into q0 128 bits (0..127)
5687   // 0x01 - insert into q1 128 bits (128..255)
5688   // 0x02 - insert into q2 128 bits (256..383)
5689   // 0x03 - insert into q3 128 bits (384..511)
5690   emit_int8(imm8 & 0x03);
5691 }
5692 
5693 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5694   assert(VM_Version::supports_avx(), "");
5695   assert(dst != xnoreg, "sanity");
5696   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5697   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5698   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5699   InstructionMark im(this);
5700   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5701   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5702   // swap src<->dst for encoding
5703   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5704   emit_int8(0x18);
5705   emit_operand(dst, src);
5706   // 0x00 - insert into lower 128 bits
5707   // 0x01 - insert into upper 128 bits
5708   emit_int8(imm8 & 0x01);
5709 }
5710 
5711 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, int imm8) {
5712   assert(VM_Version::supports_avx(), "");
5713   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5714   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5715   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5716   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5717   emit_int8(0x19);
5718   emit_int8((unsigned char)(0xC0 | encode));
5719   // 0x00 - extract from lower 128 bits
5720   // 0x01 - extract from upper 128 bits
5721   emit_int8(imm8 & 0x01);
5722 }
5723 
5724 void Assembler::vextractf128(Address dst, XMMRegister src, int imm8) {
5725   assert(VM_Version::supports_avx(), "");
5726   assert(src != xnoreg, "sanity");
5727   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5728   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5729   InstructionMark im(this);
5730   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5731   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5732   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5733   emit_int8(0x19);
5734   emit_operand(src, dst);
5735   // 0x00 - extract from lower 128 bits
5736   // 0x01 - extract from upper 128 bits
5737   emit_int8(imm8 & 0x01);
5738 }
5739 
5740 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5741   assert(VM_Version::supports_avx2(), "");
5742   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5743   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5744   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5745   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5746   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5747   emit_int8(0x38);
5748   emit_int8((unsigned char)(0xC0 | encode));
5749   // 0x00 - insert into lower 128 bits
5750   // 0x01 - insert into upper 128 bits
5751   emit_int8(imm8 & 0x01);
5752 }
5753 
5754 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
5755   assert(VM_Version::supports_evex(), "");
5756   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5757   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5758   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5759   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5760   emit_int8(0x38);
5761   emit_int8((unsigned char)(0xC0 | encode));
5762   // 0x00 - insert into lower 256 bits
5763   // 0x01 - insert into upper 256 bits
5764   emit_int8(imm8 & 0x01);
5765 }
5766 
5767 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, int imm8) {
5768   assert(VM_Version::supports_avx2(), "");
5769   assert(dst != xnoreg, "sanity");
5770   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5771   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5772   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
5773   InstructionMark im(this);
5774   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5775   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5776   // swap src<->dst for encoding
5777   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5778   emit_int8(0x38);
5779   emit_operand(dst, src);
5780   // 0x00 - insert into lower 128 bits
5781   // 0x01 - insert into upper 128 bits
5782   emit_int8(imm8 & 0x01);
5783 }
5784 
5785 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, int imm8) {
5786   assert(VM_Version::supports_avx(), "");
5787   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5788   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5789   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5790   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5791   emit_int8(0x39);
5792   emit_int8((unsigned char)(0xC0 | encode));
5793   // 0x00 - extract from lower 128 bits
5794   // 0x01 - extract from upper 128 bits
5795   emit_int8(imm8 & 0x01);
5796 }
5797 
5798 void Assembler::vextracti128(Address dst, XMMRegister src, int imm8) {
5799   assert(VM_Version::supports_avx2(), "");
5800   assert(src != xnoreg, "sanity");
5801   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5802   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5803   InstructionMark im(this);
5804   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5805   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5806   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5807   emit_int8(0x39);
5808   emit_operand(src, dst);
5809   // 0x00 - extract from lower 128 bits
5810   // 0x01 - extract from upper 128 bits
5811   emit_int8(imm8 & 0x01);
5812 }
5813 
5814 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, int imm8) {
5815   assert(VM_Version::supports_evex(), "");
5816   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5817   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5818   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5819   emit_int8(0x3B);
5820   emit_int8((unsigned char)(0xC0 | encode));
5821   // 0x00 - extract from lower 256 bits
5822   // 0x01 - extract from upper 256 bits
5823   emit_int8(imm8 & 0x01);
5824 }
5825 
5826 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, int imm8) {
5827   assert(VM_Version::supports_evex(), "");
5828   assert((unsigned int)imm8 <= 0x03, "imm8: %d\n", imm8);
5829   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5830   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5831   emit_int8(0x39);
5832   emit_int8((unsigned char)(0xC0 | encode));
5833   // 0x00 - extract from bits 127:0
5834   // 0x01 - extract from bits 255:128
5835   // 0x02 - extract from bits 383:256
5836   // 0x03 - extract from bits 511:384
5837   emit_int8(imm8 & 0x03);
5838 }
5839 
5840 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, int imm8) {
5841   assert(VM_Version::supports_evex(), "");
5842   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5843   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5844   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5845   emit_int8(0x1B);
5846   emit_int8((unsigned char)(0xC0 | encode));
5847   // 0x00 - extract from lower 256 bits
5848   // 0x01 - extract from upper 256 bits
5849   emit_int8(imm8 & 0x01);
5850 }
5851 
5852 void Assembler::vextractf64x4(Address dst, XMMRegister src, int imm8) {
5853   assert(VM_Version::supports_evex(), "");
5854   assert(src != xnoreg, "sanity");
5855   assert((unsigned int)imm8 <= 0x01, "imm8: %d\n", imm8);
5856   InstructionMark im(this);
5857   InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5858   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
5859   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5860   emit_int8(0x1B);
5861   emit_operand(src, dst);
5862   // 0x00 - extract from lower 256 bits
5863   // 0x01 - extract from upper 256 bits
5864   emit_int8(imm8 & 0x01);
5865 }
5866 
5867 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, int imm8) {
5868   assert(VM_Version::supports_avx(), "");
5869   assert((unsigned int)imm8 <= 0x03, "imm8: %d\n", imm8);
5870   int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
5871   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5872   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5873   emit_int8(0x19);
5874   emit_int8((unsigned char)(0xC0 | encode));
5875   // 0x00 - extract from bits 127:0
5876   // 0x01 - extract from bits 255:128
5877   // 0x02 - extract from bits 383:256
5878   // 0x03 - extract from bits 511:384
5879   emit_int8(imm8 & 0x03);
5880 }
5881 
5882 void Assembler::vextractf32x4(Address dst, XMMRegister src, int imm8) {
5883   assert(VM_Version::supports_evex(), "");
5884   assert(src != xnoreg, "sanity");
5885   assert((unsigned int)imm8 <= 0x03, "imm8: %d\n", imm8);
5886   InstructionMark im(this);
5887   InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5888   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
5889   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5890   emit_int8(0x19);
5891   emit_operand(src, dst);
5892   // 0x00 - extract from bits 127:0
5893   // 0x01 - extract from bits 255:128
5894   // 0x02 - extract from bits 383:256
5895   // 0x03 - extract from bits 511:384
5896   emit_int8(imm8 & 0x03);
5897 }
5898 
5899 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, int imm8) {
5900   assert(VM_Version::supports_evex(), "");
5901   assert((unsigned int)imm8 <= 0x03, "imm8: %d\n", imm8);
5902   InstructionAttr attributes(AVX_512bit, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
5903   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
5904   emit_int8(0x19);
5905   emit_int8((unsigned char)(0xC0 | encode));
5906   // 0x00 - extract from bits 127:0
5907   // 0x01 - extract from bits 255:128
5908   // 0x02 - extract from bits 383:256
5909   // 0x03 - extract from bits 511:384
5910   emit_int8(imm8 & 0x03);
5911 }
5912 
5913 // duplicate 4-bytes integer data from src into 8 locations in dest
5914 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
5915   assert(VM_Version::supports_avx2(), "");
5916   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
5917   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5918   emit_int8(0x58);
5919   emit_int8((unsigned char)(0xC0 | encode));
5920 }
5921 
5922 // duplicate 2-bytes integer data from src into 16 locations in dest
5923 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
5924   assert(VM_Version::supports_avx2(), "");
5925   InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5926   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5927   emit_int8(0x79);
5928   emit_int8((unsigned char)(0xC0 | encode));
5929 }
5930 


src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File