< prev index next >

src/hotspot/cpu/x86/assembler_x86.cpp

Print this page
rev 58404 : 8241042: x86_64: Improve Assembler generation
Reviewed-by: vlivanov

*** 246,283 **** void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { assert(dst->has_byte_register(), "must have byte register"); assert(isByte(op1) && isByte(op2), "wrong opcode"); assert(isByte(imm8), "not a byte"); assert((op1 & 0x01) == 0, "should be 8bit operation"); ! emit_int8(op1); ! emit_int8(op2 | encode(dst)); ! emit_int8(imm8); } void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { assert(isByte(op1) && isByte(op2), "wrong opcode"); assert((op1 & 0x01) == 1, "should be 32bit operation"); assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); if (is8bit(imm32)) { ! emit_int8(op1 | 0x02); // set sign bit ! emit_int8(op2 | encode(dst)); ! emit_int8(imm32 & 0xFF); } else { ! emit_int8(op1); ! emit_int8(op2 | encode(dst)); emit_int32(imm32); } } // Force generation of a 4 byte immediate value even if it fits into 8bit void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { assert(isByte(op1) && isByte(op2), "wrong opcode"); assert((op1 & 0x01) == 1, "should be 32bit operation"); assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); ! emit_int8(op1); ! emit_int8(op2 | encode(dst)); emit_int32(imm32); } // immediate-to-memory forms void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) { --- 246,278 ---- void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) { assert(dst->has_byte_register(), "must have byte register"); assert(isByte(op1) && isByte(op2), "wrong opcode"); assert(isByte(imm8), "not a byte"); assert((op1 & 0x01) == 0, "should be 8bit operation"); ! emit_int24(op1, op2 | encode(dst), imm8); } void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) { assert(isByte(op1) && isByte(op2), "wrong opcode"); assert((op1 & 0x01) == 1, "should be 32bit operation"); assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); if (is8bit(imm32)) { ! // set sign bit ! emit_int24(op1 | 0x02, op2 | encode(dst), imm32 & 0xFF); } else { ! emit_int16(op1, op2 | encode(dst)); emit_int32(imm32); } } // Force generation of a 4 byte immediate value even if it fits into 8bit void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) { assert(isByte(op1) && isByte(op2), "wrong opcode"); assert((op1 & 0x01) == 1, "should be 32bit operation"); assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); ! emit_int16(op1, op2 | encode(dst)); emit_int32(imm32); } // immediate-to-memory forms void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
*** 295,306 **** } void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { assert(isByte(op1) && isByte(op2), "wrong opcode"); ! emit_int8(op1); ! emit_int8(op2 | encode(dst) << 3 | encode(src)); } bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, int cur_tuple_type, int in_size_in_bits, int cur_encoding) { --- 290,300 ---- } void Assembler::emit_arith(int op1, int op2, Register dst, Register src) { assert(isByte(op1) && isByte(op2), "wrong opcode"); ! emit_int16(op1, op2 | encode(dst) << 3 | encode(src)); } bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, int cur_tuple_type, int in_size_in_bits, int cur_encoding) {
*** 478,554 **** void Assembler::emit_operand(Register reg, Register base, Register index, Address::ScaleFactor scale, int disp, RelocationHolder const& rspec, int rip_relative_correction) { ! relocInfo::relocType rtype = rspec.type(); // Encode the registers as needed in the fields they are used in - int regenc = encode(reg) << 3; - int indexenc = index->is_valid() ? encode(index) << 3 : 0; - int baseenc = base->is_valid() ? encode(base) : 0; - if (base->is_valid()) { if (index->is_valid()) { assert(scale != Address::no_scale, "inconsistent address"); // [base + index*scale + disp] ! if (disp == 0 && rtype == relocInfo::none && base != rbp LP64_ONLY(&& base != r13)) { // [base + index*scale] // [00 reg 100][ss index base] assert(index != rsp, "illegal addressing mode"); ! emit_int8(0x04 | regenc); ! emit_int8(scale << 6 | indexenc | baseenc); ! } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { // [base + index*scale + imm8] // [01 reg 100][ss index base] imm8 assert(index != rsp, "illegal addressing mode"); ! emit_int8(0x44 | regenc); ! emit_int8(scale << 6 | indexenc | baseenc); ! emit_int8(disp & 0xFF); } else { // [base + index*scale + disp32] // [10 reg 100][ss index base] disp32 assert(index != rsp, "illegal addressing mode"); ! emit_int8(0x84 | regenc); ! emit_int8(scale << 6 | indexenc | baseenc); emit_data(disp, rspec, disp32_operand); } } else if (base == rsp LP64_ONLY(|| base == r12)) { // [rsp + disp] ! if (disp == 0 && rtype == relocInfo::none) { // [rsp] // [00 reg 100][00 100 100] ! emit_int8(0x04 | regenc); ! emit_int8(0x24); ! } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { // [rsp + imm8] // [01 reg 100][00 100 100] disp8 ! emit_int8(0x44 | regenc); ! emit_int8(0x24); ! emit_int8(disp & 0xFF); } else { // [rsp + imm32] // [10 reg 100][00 100 100] disp32 ! emit_int8(0x84 | regenc); ! emit_int8(0x24); emit_data(disp, rspec, disp32_operand); } } else { // [base + disp] assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); ! if (disp == 0 && rtype == relocInfo::none && base != rbp LP64_ONLY(&& base != r13)) { // [base] // [00 reg base] emit_int8(0x00 | regenc | baseenc); ! } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) { // [base + disp8] // [01 reg base] disp8 ! emit_int8(0x40 | regenc | baseenc); ! emit_int8(disp & 0xFF); } else { // [base + disp32] // [10 reg base] disp32 emit_int8(0x80 | regenc | baseenc); emit_data(disp, rspec, disp32_operand); --- 472,537 ---- void Assembler::emit_operand(Register reg, Register base, Register index, Address::ScaleFactor scale, int disp, RelocationHolder const& rspec, int rip_relative_correction) { ! bool no_relocation = rspec.type() == relocInfo::none; // Encode the registers as needed in the fields they are used in int regenc = encode(reg) << 3; if (base->is_valid()) { + int baseenc = encode(base); if (index->is_valid()) { assert(scale != Address::no_scale, "inconsistent address"); // [base + index*scale + disp] ! int indexenc = encode(index) << 3; ! if (disp == 0 && no_relocation && base != rbp LP64_ONLY(&& base != r13)) { // [base + index*scale] // [00 reg 100][ss index base] assert(index != rsp, "illegal addressing mode"); ! emit_int16(0x04 | regenc, scale << 6 | indexenc | baseenc); ! } else if (emit_compressed_disp_byte(disp) && no_relocation) { // [base + index*scale + imm8] // [01 reg 100][ss index base] imm8 assert(index != rsp, "illegal addressing mode"); ! emit_int24(0x44 | regenc, scale << 6 | indexenc | baseenc, disp & 0xFF); } else { // [base + index*scale + disp32] // [10 reg 100][ss index base] disp32 assert(index != rsp, "illegal addressing mode"); ! emit_int16(0x84 | regenc, scale << 6 | indexenc | baseenc); emit_data(disp, rspec, disp32_operand); } } else if (base == rsp LP64_ONLY(|| base == r12)) { // [rsp + disp] ! if (disp == 0 && no_relocation) { // [rsp] // [00 reg 100][00 100 100] ! emit_int16(0x04 | regenc, 0x24); ! } else if (emit_compressed_disp_byte(disp) && no_relocation) { // [rsp + imm8] // [01 reg 100][00 100 100] disp8 ! emit_int24(0x44 | regenc, 0x24, disp & 0xFF); } else { // [rsp + imm32] // [10 reg 100][00 100 100] disp32 ! emit_int16(0x84 | regenc, 0x24); emit_data(disp, rspec, disp32_operand); } } else { // [base + disp] assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode"); ! if (disp == 0 && no_relocation && base != rbp LP64_ONLY(&& base != r13)) { // [base] // [00 reg base] emit_int8(0x00 | regenc | baseenc); ! } else if (emit_compressed_disp_byte(disp) && no_relocation) { // [base + disp8] // [01 reg base] disp8 ! emit_int16(0x40 | regenc | baseenc, disp & 0xFF); } else { // [base + disp32] // [10 reg base] disp32 emit_int8(0x80 | regenc | baseenc); emit_data(disp, rspec, disp32_operand);
*** 558,571 **** if (index->is_valid()) { assert(scale != Address::no_scale, "inconsistent address"); // [index*scale + disp] // [00 reg 100][ss index 101] disp32 assert(index != rsp, "illegal addressing mode"); ! emit_int8(0x04 | regenc); ! emit_int8(scale << 6 | indexenc | 0x05); emit_data(disp, rspec, disp32_operand); ! } else if (rtype != relocInfo::none ) { // [disp] (64bit) RIP-RELATIVE (32bit) abs // [00 000 101] disp32 emit_int8(0x05 | regenc); // Note that the RIP-rel. correction applies to the generated --- 541,553 ---- if (index->is_valid()) { assert(scale != Address::no_scale, "inconsistent address"); // [index*scale + disp] // [00 reg 100][ss index 101] disp32 assert(index != rsp, "illegal addressing mode"); ! emit_int16(0x04 | regenc, scale << 6 | (encode(index) << 3) | 0x05); emit_data(disp, rspec, disp32_operand); ! } else if (!no_relocation) { // [disp] (64bit) RIP-RELATIVE (32bit) abs // [00 000 101] disp32 emit_int8(0x05 | regenc); // Note that the RIP-rel. correction applies to the generated
*** 585,596 **** } else { // 32bit never did this, did everything as the rip-rel/disp code above // [disp] ABSOLUTE // [00 reg 100][00 100 101] disp32 ! emit_int8(0x04 | regenc); ! emit_int8(0x25); emit_data(disp, rspec, disp32_operand); } } } --- 567,577 ---- } else { // 32bit never did this, did everything as the rip-rel/disp code above // [disp] ABSOLUTE // [00 reg 100][00 100 101] disp32 ! emit_int16(0x04 | regenc, 0x25); emit_data(disp, rspec, disp32_operand); } } }
*** 1146,1157 **** void Assembler::emit_farith(int b1, int b2, int i) { assert(isByte(b1) && isByte(b2), "wrong opcode"); assert(0 <= i && i < 8, "illegal stack offset"); ! emit_int8(b1); ! emit_int8(b2 + i); } // Now the Assembler instructions (identical for 32/64 bits) --- 1127,1137 ---- void Assembler::emit_farith(int b1, int b2, int i) { assert(isByte(b1) && isByte(b2), "wrong opcode"); assert(0 <= i && i < 8, "illegal stack offset"); ! emit_int16(b1, b2 + i); } // Now the Assembler instructions (identical for 32/64 bits)
*** 1233,1286 **** } void Assembler::addr_nop_4() { assert(UseAddressNop, "no CPU support"); // 4 bytes: NOP DWORD PTR [EAX+0] ! emit_int8(0x0F); ! emit_int8(0x1F); ! emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); ! emit_int8(0); // 8-bits offset (1 byte) } void Assembler::addr_nop_5() { assert(UseAddressNop, "no CPU support"); // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset ! emit_int8(0x0F); ! emit_int8(0x1F); ! emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4); ! emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); emit_int8(0); // 8-bits offset (1 byte) } void Assembler::addr_nop_7() { assert(UseAddressNop, "no CPU support"); // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset ! emit_int8(0x0F); ! emit_int8(0x1F); ! emit_int8((unsigned char)0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); emit_int32(0); // 32-bits offset (4 bytes) } void Assembler::addr_nop_8() { assert(UseAddressNop, "no CPU support"); // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset ! emit_int8(0x0F); ! emit_int8(0x1F); ! emit_int8((unsigned char)0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4); ! emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); emit_int32(0); // 32-bits offset (4 bytes) } void Assembler::addsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::addsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 1213,1265 ---- } void Assembler::addr_nop_4() { assert(UseAddressNop, "no CPU support"); // 4 bytes: NOP DWORD PTR [EAX+0] ! emit_int32(0x0F, ! 0x1F, ! 0x40, // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc); ! 0); // 8-bits offset (1 byte) } void Assembler::addr_nop_5() { assert(UseAddressNop, "no CPU support"); // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset ! emit_int32(0x0F, ! 0x1F, ! 0x44, // emit_rm(cbuf, 0x1, EAX_enc, 0x4); ! 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); emit_int8(0); // 8-bits offset (1 byte) } void Assembler::addr_nop_7() { assert(UseAddressNop, "no CPU support"); // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset ! emit_int24(0x0F, ! 0x1F, ! (unsigned char)0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc); emit_int32(0); // 32-bits offset (4 bytes) } void Assembler::addr_nop_8() { assert(UseAddressNop, "no CPU support"); // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset ! emit_int32(0x0F, ! 0x1F, ! (unsigned char)0x84, // emit_rm(cbuf, 0x2, EAX_enc, 0x4); ! 0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc); emit_int32(0); // 32-bits offset (4 bytes) } void Assembler::addsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::addsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 1294,1305 **** void Assembler::addss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::addss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 1273,1283 ---- void Assembler::addss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::addss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 1321,1341 **** void Assembler::aesdec(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDE); ! emit_int8(0xC0 | encode); } void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDE); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::aesdeclast(XMMRegister dst, Address src) { assert(VM_Version::supports_aes(), ""); --- 1299,1317 ---- void Assembler::aesdec(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDE, 0xC0 | encode); } void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDE, (unsigned char)(0xC0 | encode)); } void Assembler::aesdeclast(XMMRegister dst, Address src) { assert(VM_Version::supports_aes(), "");
*** 1348,1368 **** void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::aesenc(XMMRegister dst, Address src) { assert(VM_Version::supports_aes(), ""); InstructionMark im(this); --- 1324,1342 ---- void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); } void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); } void Assembler::aesenc(XMMRegister dst, Address src) { assert(VM_Version::supports_aes(), ""); InstructionMark im(this);
*** 1374,1394 **** void Assembler::aesenc(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDC); ! emit_int8(0xC0 | encode); } void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::aesenclast(XMMRegister dst, Address src) { assert(VM_Version::supports_aes(), ""); InstructionMark im(this); --- 1348,1366 ---- void Assembler::aesenc(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDC, 0xC0 | encode); } void Assembler::vaesenc(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDC, (unsigned char)(0xC0 | encode)); } void Assembler::aesenclast(XMMRegister dst, Address src) { assert(VM_Version::supports_aes(), ""); InstructionMark im(this);
*** 1400,1420 **** void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xDD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::andl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst); --- 1372,1390 ---- void Assembler::aesenclast(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_aes(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDD, (unsigned char)(0xC0 | encode)); } void Assembler::vaesenclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vaes(), "requires vaes support/enabling"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xDD, (unsigned char)(0xC0 | encode)); } void Assembler::andl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst);
*** 1442,1453 **** void Assembler::andnl(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::andnl(Register dst, Register src1, Address src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 1412,1422 ---- void Assembler::andnl(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); } void Assembler::andnl(Register dst, Register src1, Address src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 1457,1490 **** emit_operand(dst, src2); } void Assembler::bsfl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bsrl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bswapl(Register reg) { // bswap int encode = prefix_and_encode(reg->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)(0xC8 | encode)); } void Assembler::blsil(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::blsil(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 1426,1453 ---- emit_operand(dst, src2); } void Assembler::bsfl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)(0xC0 | encode)); } void Assembler::bsrl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); } void Assembler::bswapl(Register reg) { // bswap int encode = prefix_and_encode(reg->encoding()); ! emit_int16(0x0F, (unsigned char)(0xC8 | encode)); } void Assembler::blsil(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::blsil(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 1496,1507 **** void Assembler::blsmskl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::blsmskl(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 1459,1469 ---- void Assembler::blsmskl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::blsmskl(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 1513,1524 **** void Assembler::blsrl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::blsrl(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 1475,1485 ---- void Assembler::blsrl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::blsrl(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 1550,1561 **** } } void Assembler::call(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xFF); ! emit_int8((unsigned char)(0xD0 | encode)); } void Assembler::call(Address adr) { InstructionMark im(this); --- 1511,1521 ---- } } void Assembler::call(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xFF, (unsigned char)(0xD0 | encode)); } void Assembler::call(Address adr) { InstructionMark im(this);
*** 1586,1606 **** } void Assembler::cmovl(Condition cc, Register dst, Register src) { NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8(0x40 | cc); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cmovl(Condition cc, Register dst, Address src) { NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8(0x40 | cc); emit_operand(dst, src); } void Assembler::cmpb(Address dst, int imm8) { InstructionMark im(this); --- 1546,1563 ---- } void Assembler::cmovl(Condition cc, Register dst, Register src) { NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, 0x40 | cc, (unsigned char)(0xC0 | encode)); } void Assembler::cmovl(Condition cc, Register dst, Address src) { NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction")); prefix(src, dst); ! emit_int16(0x0F, 0x40 | cc); emit_operand(dst, src); } void Assembler::cmpb(Address dst, int imm8) { InstructionMark im(this);
*** 1636,1670 **** } void Assembler::cmpw(Address dst, int imm16) { InstructionMark im(this); assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); ! emit_int8(0x66); ! emit_int8((unsigned char)0x81); emit_operand(rdi, dst, 2); emit_int16(imm16); } // The 32-bit cmpxchg compares the value at adr with the contents of rax, // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. // The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg InstructionMark im(this); prefix(adr, reg); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB1); emit_operand(reg, adr); } // The 8-bit cmpxchg compares the value at adr with the contents of rax, // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. // The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg InstructionMark im(this); prefix(adr, reg, true); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB0); emit_operand(reg, adr); } void Assembler::comisd(XMMRegister dst, Address src) { // NOTE: dbx seems to decode this as comiss even though the --- 1593,1624 ---- } void Assembler::cmpw(Address dst, int imm16) { InstructionMark im(this); assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers"); ! emit_int16(0x66, (unsigned char)0x81); emit_operand(rdi, dst, 2); emit_int16(imm16); } // The 32-bit cmpxchg compares the value at adr with the contents of rax, // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. // The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg InstructionMark im(this); prefix(adr, reg); ! emit_int16(0x0F, (unsigned char)0xB1); emit_operand(reg, adr); } // The 8-bit cmpxchg compares the value at adr with the contents of rax, // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. // The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg InstructionMark im(this); prefix(adr, reg, true); ! emit_int16(0x0F, (unsigned char)0xB0); emit_operand(reg, adr); } void Assembler::comisd(XMMRegister dst, Address src) { // NOTE: dbx seems to decode this as comiss even though the
*** 1682,1693 **** void Assembler::comisd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::comiss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 1636,1646 ---- void Assembler::comisd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2F, (unsigned char)(0xC0 | encode)); } void Assembler::comiss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 1700,1716 **** void Assembler::comiss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cpuid() { ! emit_int8(0x0F); ! emit_int8((unsigned char)0xA2); } // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - --- 1653,1667 ---- void Assembler::comiss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2F, (unsigned char)(0xC0 | encode)); } void Assembler::cpuid() { ! emit_int16(0x0F, (unsigned char)0xA2); } // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. -
*** 1753,1766 **** default: assert(0, "Unsupported value for a sizeInBytes argument"); break; } LP64_ONLY(prefix(crc, v, p);) ! emit_int8((int8_t)0x0F); ! emit_int8(0x38); ! emit_int8((int8_t)(0xF0 | w)); ! emit_int8(0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); } void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { assert(VM_Version::supports_sse4_2(), ""); InstructionMark im(this); --- 1704,1717 ---- default: assert(0, "Unsupported value for a sizeInBytes argument"); break; } LP64_ONLY(prefix(crc, v, p);) ! emit_int32((int8_t)0x0F, ! 0x38, ! (int8_t)(0xF0 | w), ! 0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7)); } void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) { assert(VM_Version::supports_sse4_2(), ""); InstructionMark im(this);
*** 1782,1820 **** default: assert(0, "Unsupported value for a sizeInBytes argument"); break; } LP64_ONLY(prefix(crc, adr, p);) ! emit_int8((int8_t)0x0F); ! emit_int8(0x38); ! emit_int8((int8_t)(0xF0 | w)); emit_operand(crc, adr); } void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE6); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5B); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtsd2ss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 1733,1766 ---- default: assert(0, "Unsupported value for a sizeInBytes argument"); break; } LP64_ONLY(prefix(crc, adr, p);) ! emit_int24(0x0F, 0x38, (unsigned char)(0xF0 | w)); emit_operand(crc, adr); } void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE6, (unsigned char)(0xC0 | encode)); } void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5B, (unsigned char)(0xC0 | encode)); } void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5A, (unsigned char)(0xC0 | encode)); } void Assembler::cvtsd2ss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 1828,1839 **** void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 1774,1784 ---- void Assembler::cvtsi2sdl(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2A, (unsigned char)(0xC0 | encode)); } void Assembler::cvtsi2sdl(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 1846,1857 **** void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 1791,1801 ---- void Assembler::cvtsi2ssl(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2A, (unsigned char)(0xC0 | encode)); } void Assembler::cvtsi2ssl(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 1864,1883 **** void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtss2sd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 1808,1825 ---- void Assembler::cvtsi2ssq(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2A, (unsigned char)(0xC0 | encode)); } void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5A, (unsigned char)(0xC0 | encode)); } void Assembler::cvtss2sd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 1891,1982 **** void Assembler::cvttsd2sil(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvttss2sil(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE6); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pabsb(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x1C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pabsw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x1D); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pabsd(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x1E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0x1C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0x1D); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_evex() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0x1E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { assert(UseAVX > 2, ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0x1F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::decl(Address dst) { // Don't use it directly. Use MacroAssembler::decrement() instead. InstructionMark im(this); --- 1833,1914 ---- void Assembler::cvttsd2sil(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2C, (unsigned char)(0xC0 | encode)); } void Assembler::cvttss2sil(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2C, (unsigned char)(0xC0 | encode)); } void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE6, (unsigned char)(0xC0 | encode)); } void Assembler::pabsb(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1C, (unsigned char)(0xC0 | encode)); } void Assembler::pabsw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1D, (unsigned char)(0xC0 | encode)); } void Assembler::pabsd(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1E, (unsigned char)(0xC0 | encode)); } void Assembler::vpabsb(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1C, (unsigned char)(0xC0 | encode)); } void Assembler::vpabsw(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1D, (unsigned char)(0xC0 | encode)); } void Assembler::vpabsd(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_evex() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1E, (unsigned char)(0xC0 | encode)); } void Assembler::evpabsq(XMMRegister dst, XMMRegister src, int vector_len) { assert(UseAVX > 2, ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x1F, (unsigned char)(0xC0 | encode)); } void Assembler::decl(Address dst) { // Don't use it directly. Use MacroAssembler::decrement() instead. InstructionMark im(this);
*** 1999,2010 **** void Assembler::divsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::divss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 1931,1941 ---- void Assembler::divsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::divss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 2017,2084 **** void Assembler::divss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::emms() { NOT_LP64(assert(VM_Version::supports_mmx(), "")); ! emit_int8(0x0F); ! emit_int8(0x77); } void Assembler::hlt() { emit_int8((unsigned char)0xF4); } void Assembler::idivl(Register src) { int encode = prefix_and_encode(src->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xF8 | encode)); } void Assembler::divl(Register src) { // Unsigned int encode = prefix_and_encode(src->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xF0 | encode)); } void Assembler::imull(Register src) { int encode = prefix_and_encode(src->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xE8 | encode)); } void Assembler::imull(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::imull(Register dst, Register src, int value) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); if (is8bit(value)) { ! emit_int8(0x6B); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(value & 0xFF); } else { ! emit_int8(0x69); ! emit_int8((unsigned char)(0xC0 | encode)); emit_int32(value); } } void Assembler::imull(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char) 0xAF); emit_operand(dst, src); } void Assembler::incl(Address dst) { --- 1948,2004 ---- void Assembler::divss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::emms() { NOT_LP64(assert(VM_Version::supports_mmx(), "")); ! emit_int16(0x0F, 0x77); } void Assembler::hlt() { emit_int8((unsigned char)0xF4); } void Assembler::idivl(Register src) { int encode = prefix_and_encode(src->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xF8 | encode)); } void Assembler::divl(Register src) { // Unsigned int encode = prefix_and_encode(src->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xF0 | encode)); } void Assembler::imull(Register src) { int encode = prefix_and_encode(src->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xE8 | encode)); } void Assembler::imull(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xAF, (unsigned char)(0xC0 | encode)); } void Assembler::imull(Register dst, Register src, int value) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); if (is8bit(value)) { ! emit_int24(0x6B, (unsigned char)(0xC0 | encode), value & 0xFF); } else { ! emit_int16(0x69, (unsigned char)(0xC0 | encode)); emit_int32(value); } } void Assembler::imull(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); ! emit_int16(0x0F, (unsigned char) 0xAF); emit_operand(dst, src); } void Assembler::incl(Address dst) {
*** 2099,2126 **** const int short_size = 2; const int long_size = 6; intptr_t offs = (intptr_t)dst - (intptr_t)pc(); if (maybe_short && is8bit(offs - short_size)) { // 0111 tttn #8-bit disp ! emit_int8(0x70 | cc); ! emit_int8((offs - short_size) & 0xFF); } else { // 0000 1111 1000 tttn #32-bit disp assert(is_simm32(offs - long_size), "must be 32bit offset (call4)"); ! emit_int8(0x0F); ! emit_int8((unsigned char)(0x80 | cc)); emit_int32(offs - long_size); } } else { // Note: could eliminate cond. jumps to this jump if condition // is the same however, seems to be rather unlikely case. // Note: use jccb() if label to be bound is very close to get // an 8-bit displacement L.add_patch_at(code(), locator()); ! emit_int8(0x0F); ! emit_int8((unsigned char)(0x80 | cc)); emit_int32(0); } } void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) { --- 2019,2043 ---- const int short_size = 2; const int long_size = 6; intptr_t offs = (intptr_t)dst - (intptr_t)pc(); if (maybe_short && is8bit(offs - short_size)) { // 0111 tttn #8-bit disp ! emit_int16(0x70 | cc, (offs - short_size) & 0xFF); } else { // 0000 1111 1000 tttn #32-bit disp assert(is_simm32(offs - long_size), "must be 32bit offset (call4)"); ! emit_int16(0x0F, (unsigned char)(0x80 | cc)); emit_int32(offs - long_size); } } else { // Note: could eliminate cond. jumps to this jump if condition // is the same however, seems to be rather unlikely case. // Note: use jccb() if label to be bound is very close to get // an 8-bit displacement L.add_patch_at(code(), locator()); ! emit_int16(0x0F, (unsigned char)(0x80 | cc)); emit_int32(0); } } void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) {
*** 2135,2151 **** } assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); #endif intptr_t offs = (intptr_t)entry - (intptr_t)pc(); // 0111 tttn #8-bit disp ! emit_int8(0x70 | cc); ! emit_int8((offs - short_size) & 0xFF); } else { InstructionMark im(this); L.add_patch_at(code(), locator(), file, line); ! emit_int8(0x70 | cc); ! emit_int8(0); } } void Assembler::jmp(Address adr) { InstructionMark im(this); --- 2052,2066 ---- } assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); #endif intptr_t offs = (intptr_t)entry - (intptr_t)pc(); // 0111 tttn #8-bit disp ! emit_int16(0x70 | cc, (offs - short_size) & 0xFF); } else { InstructionMark im(this); L.add_patch_at(code(), locator(), file, line); ! emit_int16(0x70 | cc, 0); } } void Assembler::jmp(Address adr) { InstructionMark im(this);
*** 2161,2172 **** InstructionMark im(this); const int short_size = 2; const int long_size = 5; intptr_t offs = entry - pc(); if (maybe_short && is8bit(offs - short_size)) { ! emit_int8((unsigned char)0xEB); ! emit_int8((offs - short_size) & 0xFF); } else { emit_int8((unsigned char)0xE9); emit_int32(offs - long_size); } } else { --- 2076,2086 ---- InstructionMark im(this); const int short_size = 2; const int long_size = 5; intptr_t offs = entry - pc(); if (maybe_short && is8bit(offs - short_size)) { ! emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); } else { emit_int8((unsigned char)0xE9); emit_int32(offs - long_size); } } else {
*** 2181,2192 **** } } void Assembler::jmp(Register entry) { int encode = prefix_and_encode(entry->encoding()); ! emit_int8((unsigned char)0xFF); ! emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { InstructionMark im(this); emit_int8((unsigned char)0xE9); --- 2095,2105 ---- } } void Assembler::jmp(Register entry) { int encode = prefix_and_encode(entry->encoding()); ! emit_int16((unsigned char)0xFF, (unsigned char)(0xE0 | encode)); } void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) { InstructionMark im(this); emit_int8((unsigned char)0xE9);
*** 2208,2224 **** dist += (dist < 0 ? (-delta) :delta); } assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); #endif intptr_t offs = entry - pc(); ! emit_int8((unsigned char)0xEB); ! emit_int8((offs - short_size) & 0xFF); } else { InstructionMark im(this); L.add_patch_at(code(), locator(), file, line); ! emit_int8((unsigned char)0xEB); ! emit_int8(0); } } void Assembler::ldmxcsr( Address src) { if (UseAVX > 0 ) { --- 2121,2135 ---- dist += (dist < 0 ? (-delta) :delta); } assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line); #endif intptr_t offs = entry - pc(); ! emit_int16((unsigned char)0xEB, (offs - short_size) & 0xFF); } else { InstructionMark im(this); L.add_patch_at(code(), locator(), file, line); ! emit_int16((unsigned char)0xEB, 0); } } void Assembler::ldmxcsr( Address src) { if (UseAVX > 0 ) {
*** 2229,2240 **** emit_operand(as_Register(2), src); } else { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); prefix(src); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(as_Register(2), src); } } void Assembler::leal(Register dst, Address src) { --- 2140,2150 ---- emit_operand(as_Register(2), src); } else { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); prefix(src); ! emit_int16(0x0F, (unsigned char)0xAE); emit_operand(as_Register(2), src); } } void Assembler::leal(Register dst, Address src) {
*** 2246,2287 **** emit_int8((unsigned char)0x8D); emit_operand(dst, src); } void Assembler::lfence() { ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); ! emit_int8((unsigned char)0xE8); } void Assembler::lock() { emit_int8((unsigned char)0xF0); } void Assembler::lzcntl(Register dst, Register src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); emit_int8((unsigned char)0xF3); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBD); ! emit_int8((unsigned char)(0xC0 | encode)); } // Emit mfence instruction void Assembler::mfence() { NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); ! emit_int8((unsigned char)0xF0); } // Emit sfence instruction void Assembler::sfence() { NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); ! emit_int8((unsigned char)0xF8); } void Assembler::mov(Register dst, Register src) { LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); } --- 2156,2189 ---- emit_int8((unsigned char)0x8D); emit_operand(dst, src); } void Assembler::lfence() { ! emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xE8); } void Assembler::lock() { emit_int8((unsigned char)0xF0); } void Assembler::lzcntl(Register dst, Register src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); emit_int8((unsigned char)0xF3); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); } // Emit mfence instruction void Assembler::mfence() { NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) ! emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF0); } // Emit sfence instruction void Assembler::sfence() { NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");) ! emit_int24(0x0F, (unsigned char)0xAE, (unsigned char)0xF8); } void Assembler::mov(Register dst, Register src) { LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); }
*** 2290,2318 **** NOT_LP64(assert(VM_Version::supports_sse2(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x28); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movaps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x28); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movlhps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x16); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movb(Register dst, Address src) { NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); InstructionMark im(this); --- 2192,2217 ---- NOT_LP64(assert(VM_Version::supports_sse2(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x28, (unsigned char)(0xC0 | encode)); } void Assembler::movaps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x28, (unsigned char)(0xC0 | encode)); } void Assembler::movlhps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x16, (unsigned char)(0xC0 | encode)); } void Assembler::movb(Register dst, Address src) { NOT_LP64(assert(dst->has_byte_register(), "must have byte register")); InstructionMark im(this);
*** 2325,2368 **** NOT_LP64(assert(VM_Version::supports_sse3(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x12); ! emit_int8(0xC0 | encode); } void Assembler::kmovbl(KRegister dst, Register src) { assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x92); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovbl(Register dst, KRegister src) { assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x93); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovwl(KRegister dst, Register src) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x92); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovwl(Register dst, KRegister src) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x93); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovwl(KRegister dst, Address src) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this); --- 2224,2262 ---- NOT_LP64(assert(VM_Version::supports_sse3(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x12, 0xC0 | encode); } void Assembler::kmovbl(KRegister dst, Register src) { assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); } void Assembler::kmovbl(Register dst, KRegister src) { assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); } void Assembler::kmovwl(KRegister dst, Register src) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); } void Assembler::kmovwl(Register dst, KRegister src) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); } void Assembler::kmovwl(KRegister dst, Address src) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this);
*** 2374,2401 **** void Assembler::kmovdl(KRegister dst, Register src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x92); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovdl(Register dst, KRegister src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x93); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovql(KRegister dst, KRegister src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x90); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovql(KRegister dst, Address src) { assert(VM_Version::supports_avx512bw(), ""); InstructionMark im(this); --- 2268,2292 ---- void Assembler::kmovdl(KRegister dst, Register src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); } void Assembler::kmovdl(Register dst, KRegister src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); } void Assembler::kmovql(KRegister dst, KRegister src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x90, (unsigned char)(0xC0 | encode)); } void Assembler::kmovql(KRegister dst, Address src) { assert(VM_Version::supports_avx512bw(), ""); InstructionMark im(this);
*** 2416,2504 **** void Assembler::kmovql(KRegister dst, Register src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x92); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::kmovql(Register dst, KRegister src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x93); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::knotwl(KRegister dst, KRegister src) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x44); ! emit_int8((unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestbl(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x98); ! emit_int8((unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestwl(KRegister src1, KRegister src2) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x98); ! emit_int8((unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestdl(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x98); ! emit_int8((unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestql(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x98); ! emit_int8((unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::ktestql(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x99); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::ktestq(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x99); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::ktestd(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x99); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movb(Address dst, int imm8) { InstructionMark im(this); prefix(dst); --- 2307,2385 ---- void Assembler::kmovql(KRegister dst, Register src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x92, (unsigned char)(0xC0 | encode)); } void Assembler::kmovql(Register dst, KRegister src) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x93, (unsigned char)(0xC0 | encode)); } void Assembler::knotwl(KRegister dst, KRegister src) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x44, (unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestbl(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestwl(KRegister src1, KRegister src2) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestdl(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::kortestql(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x98, (unsigned char)(0xC0 | encode)); } // This instruction produces ZF or CF flags void Assembler::ktestql(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x99, (unsigned char)(0xC0 | encode)); } void Assembler::ktestq(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x99, (unsigned char)(0xC0 | encode)); } void Assembler::ktestd(KRegister src1, KRegister src2) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0x99, (unsigned char)(0xC0 | encode)); } void Assembler::movb(Address dst, int imm8) { InstructionMark im(this); prefix(dst);
*** 2518,2538 **** void Assembler::movdl(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movdl(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); // swap src/dst to get correct prefix int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x7E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movdl(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 2399,2417 ---- void Assembler::movdl(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6E, (unsigned char)(0xC0 | encode)); } void Assembler::movdl(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); // swap src/dst to get correct prefix int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x7E, (unsigned char)(0xC0 | encode)); } void Assembler::movdl(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 2555,2566 **** void Assembler::movdqa(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movdqa(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 2434,2444 ---- void Assembler::movdqa(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6F, (unsigned char)(0xC0 | encode)); } void Assembler::movdqa(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 2583,2594 **** void Assembler::movdqu(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movdqu(Address dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 2461,2471 ---- void Assembler::movdqu(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6F, (unsigned char)(0xC0 | encode)); } void Assembler::movdqu(Address dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 2603,2614 **** // Move Unaligned 256bit Vector void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { assert(UseAVX > 0, ""); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmovdqu(XMMRegister dst, Address src) { assert(UseAVX > 0, ""); InstructionMark im(this); --- 2480,2490 ---- // Move Unaligned 256bit Vector void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) { assert(UseAVX > 0, ""); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6F, (unsigned char)(0xC0 | encode)); } void Assembler::vmovdqu(XMMRegister dst, Address src) { assert(UseAVX > 0, ""); InstructionMark im(this);
*** 2637,2648 **** assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this); --- 2513,2523 ---- assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3; int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6F, (unsigned char)(0xC0 | encode)); } void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this);
*** 2734,2745 **** void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this); --- 2609,2619 ---- void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6F, (unsigned char)(0xC0 | encode)); } void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this);
*** 2767,2778 **** void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this); --- 2641,2651 ---- void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6F, (unsigned char)(0xC0 | encode)); } void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this);
*** 2805,2816 **** emit_int32(imm32); } void Assembler::movl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x8B); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst); --- 2678,2688 ---- emit_int32(imm32); } void Assembler::movl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int16((unsigned char)0x8B, (unsigned char)(0xC0 | encode)); } void Assembler::movl(Register dst, Address src) { InstructionMark im(this); prefix(src, dst);
*** 2847,2865 **** emit_operand(dst, src); } void Assembler::movq( MMXRegister dst, Address src ) { assert( VM_Version::supports_mmx(), "" ); ! emit_int8(0x0F); ! emit_int8(0x6F); emit_operand(dst, src); } void Assembler::movq( Address dst, MMXRegister src ) { assert( VM_Version::supports_mmx(), "" ); ! emit_int8(0x0F); ! emit_int8(0x7F); // workaround gcc (3.2.1-7a) bug // In that version of gcc with only an emit_operand(MMX, Address) // gcc will tail jump and try and reverse the parameters completely // obliterating dst in the process. By having a version available // that doesn't need to swap the args at the tail jump the bug is --- 2719,2735 ---- emit_operand(dst, src); } void Assembler::movq( MMXRegister dst, Address src ) { assert( VM_Version::supports_mmx(), "" ); ! emit_int16(0x0F, 0x6F); emit_operand(dst, src); } void Assembler::movq( Address dst, MMXRegister src ) { assert( VM_Version::supports_mmx(), "" ); ! emit_int16(0x0F, 0x7F); // workaround gcc (3.2.1-7a) bug // In that version of gcc with only an emit_operand(MMX, Address) // gcc will tail jump and try and reverse the parameters completely // obliterating dst in the process. By having a version available // that doesn't need to swap the args at the tail jump the bug is
*** 2890,2919 **** } void Assembler::movsbl(Register dst, Address src) { // movsxb InstructionMark im(this); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBE); emit_operand(dst, src); } void Assembler::movsbl(Register dst, Register src) { // movsxb NOT_LP64(assert(src->has_byte_register(), "must have byte register")); int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBE); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x10); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 2760,2785 ---- } void Assembler::movsbl(Register dst, Address src) { // movsxb InstructionMark im(this); prefix(src, dst); ! emit_int16(0x0F, (unsigned char)0xBE); emit_operand(dst, src); } void Assembler::movsbl(Register dst, Register src) { // movsxb NOT_LP64(assert(src->has_byte_register(), "must have byte register")); int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); ! emit_int24(0x0F, (unsigned char)0xBE, (unsigned char)(0xC0 | encode)); } void Assembler::movsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x10, (unsigned char)(0xC0 | encode)); } void Assembler::movsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 2939,2950 **** void Assembler::movss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x10); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 2805,2815 ---- void Assembler::movss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x10, (unsigned char)(0xC0 | encode)); } void Assembler::movss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 2967,2986 **** } void Assembler::movswl(Register dst, Address src) { // movsxw InstructionMark im(this); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBF); emit_operand(dst, src); } void Assembler::movswl(Register dst, Register src) { // movsxw int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movw(Address dst, int imm16) { InstructionMark im(this); --- 2832,2848 ---- } void Assembler::movswl(Register dst, Address src) { // movsxw InstructionMark im(this); prefix(src, dst); ! emit_int16(0x0F, (unsigned char)0xBF); emit_operand(dst, src); } void Assembler::movswl(Register dst, Register src) { // movsxw int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBF, (unsigned char)(0xC0 | encode)); } void Assembler::movw(Address dst, int imm16) { InstructionMark im(this);
*** 3008,3043 **** } void Assembler::movzbl(Register dst, Address src) { // movzxb InstructionMark im(this); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB6); emit_operand(dst, src); } void Assembler::movzbl(Register dst, Register src) { // movzxb NOT_LP64(assert(src->has_byte_register(), "must have byte register")); int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB6); ! emit_int8(0xC0 | encode); } void Assembler::movzwl(Register dst, Address src) { // movzxw InstructionMark im(this); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB7); emit_operand(dst, src); } void Assembler::movzwl(Register dst, Register src) { // movzxw int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB7); ! emit_int8(0xC0 | encode); } void Assembler::mull(Address src) { InstructionMark im(this); prefix(src); --- 2870,2899 ---- } void Assembler::movzbl(Register dst, Address src) { // movzxb InstructionMark im(this); prefix(src, dst); ! emit_int16(0x0F, (unsigned char)0xB6); emit_operand(dst, src); } void Assembler::movzbl(Register dst, Register src) { // movzxb NOT_LP64(assert(src->has_byte_register(), "must have byte register")); int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true); ! emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); } void Assembler::movzwl(Register dst, Address src) { // movzxw InstructionMark im(this); prefix(src, dst); ! emit_int16(0x0F, (unsigned char)0xB7); emit_operand(dst, src); } void Assembler::movzwl(Register dst, Register src) { // movzxw int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xB7, 0xC0 | encode); } void Assembler::mull(Address src) { InstructionMark im(this); prefix(src);
*** 3045,3056 **** emit_operand(rsp, src); } void Assembler::mull(Register src) { int encode = prefix_and_encode(src->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::mulsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 2901,2911 ---- emit_operand(rsp, src); } void Assembler::mull(Register src) { int encode = prefix_and_encode(src->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xE0 | encode)); } void Assembler::mulsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 3065,3076 **** void Assembler::mulsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::mulss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 2920,2930 ---- void Assembler::mulsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::mulss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 3083,3100 **** void Assembler::mulss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::negl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xD8 | encode)); } void Assembler::nop(int i) { #ifdef ASSERT assert(i > 0, " "); --- 2937,2952 ---- void Assembler::mulss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::negl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xD8 | encode)); } void Assembler::nop(int i) { #ifdef ASSERT assert(i > 0, " ");
*** 3131,3162 **** // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 while(i >= 15) { // For Intel don't generate consecutive addess nops (mix with regular nops) i -= 15; ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix addr_nop_8(); ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8((unsigned char)0x90); ! // nop } switch (i) { case 14: emit_int8(0x66); // size prefix case 13: emit_int8(0x66); // size prefix case 12: addr_nop_8(); ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8((unsigned char)0x90); ! // nop break; case 11: emit_int8(0x66); // size prefix case 10: emit_int8(0x66); // size prefix --- 2983,3004 ---- // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 while(i >= 15) { // For Intel don't generate consecutive addess nops (mix with regular nops) i -= 15; ! emit_int24(0x66, 0x66, 0x66); addr_nop_8(); ! emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); } switch (i) { case 14: emit_int8(0x66); // size prefix case 13: emit_int8(0x66); // size prefix case 12: addr_nop_8(); ! emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); break; case 11: emit_int8(0x66); // size prefix case 10: emit_int8(0x66); // size prefix
*** 3214,3226 **** // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 // Size prefixes (0x66) are added for larger sizes while(i >= 22) { i -= 11; ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix addr_nop_8(); } // Generate first nop for size between 21-12 switch (i) { case 21: --- 3056,3066 ---- // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 // Size prefixes (0x66) are added for larger sizes while(i >= 22) { i -= 11; ! emit_int24(0x66, 0x66, 0x66); addr_nop_8(); } // Generate first nop for size between 21-12 switch (i) { case 21:
*** 3313,3344 **** // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 while (i >= 15) { // For ZX don't generate consecutive addess nops (mix with regular nops) i -= 15; ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix addr_nop_8(); ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8((unsigned char)0x90); ! // nop } switch (i) { case 14: emit_int8(0x66); // size prefix case 13: emit_int8(0x66); // size prefix case 12: addr_nop_8(); ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8(0x66); // size prefix ! emit_int8((unsigned char)0x90); ! // nop break; case 11: emit_int8(0x66); // size prefix case 10: emit_int8(0x66); // size prefix --- 3153,3174 ---- // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90 while (i >= 15) { // For ZX don't generate consecutive addess nops (mix with regular nops) i -= 15; ! emit_int24(0x66, 0x66, 0x66); addr_nop_8(); ! emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); } switch (i) { case 14: emit_int8(0x66); // size prefix case 13: emit_int8(0x66); // size prefix case 12: addr_nop_8(); ! emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); break; case 11: emit_int8(0x66); // size prefix case 10: emit_int8(0x66); // size prefix
*** 3384,3422 **** // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 // ! while(i > 12) { i -= 4; ! emit_int8(0x66); // size prefix ! emit_int8(0x66); ! emit_int8(0x66); ! emit_int8((unsigned char)0x90); ! // nop } // 1 - 12 nops ! if(i > 8) { ! if(i > 9) { i -= 1; emit_int8(0x66); } i -= 3; ! emit_int8(0x66); ! emit_int8(0x66); ! emit_int8((unsigned char)0x90); } // 1 - 8 nops ! if(i > 4) { ! if(i > 6) { i -= 1; emit_int8(0x66); } i -= 3; ! emit_int8(0x66); ! emit_int8(0x66); ! emit_int8((unsigned char)0x90); } switch (i) { case 4: emit_int8(0x66); case 3: --- 3214,3244 ---- // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90 // ! while (i > 12) { i -= 4; ! emit_int32(0x66, 0x66, 0x66, (unsigned char)0x90); } // 1 - 12 nops ! if (i > 8) { ! if (i > 9) { i -= 1; emit_int8(0x66); } i -= 3; ! emit_int24(0x66, 0x66, (unsigned char)0x90); } // 1 - 8 nops ! if (i > 4) { ! if (i > 6) { i -= 1; emit_int8(0x66); } i -= 3; ! emit_int24(0x66, 0x66, (unsigned char)0x90); } switch (i) { case 4: emit_int8(0x66); case 3:
*** 3431,3442 **** } } void Assembler::notl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xD0 | encode)); } void Assembler::orl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst); --- 3253,3263 ---- } } void Assembler::notl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xD0 | encode)); } void Assembler::orl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst);
*** 3488,3563 **** void Assembler::packuswb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x67); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "some form of AVX must be enabled"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x67); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x00); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 2, "requires AVX512F"); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0x36); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x46); ! emit_int8(0xC0 | encode); ! emit_int8(imm8); } void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x06); ! emit_int8(0xC0 | encode); ! emit_int8(imm8); } void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x76); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pause() { ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)0x90); } void Assembler::ud2() { ! emit_int8(0x0F); ! emit_int8(0x0B); } void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse4_2(), ""); InstructionMark im(this); --- 3309,3372 ---- void Assembler::packuswb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x67, (unsigned char)(0xC0 | encode)); } void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "some form of AVX must be enabled"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x67, (unsigned char)(0xC0 | encode)); } void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x00, (unsigned char)(0xC0 | encode), imm8); } void Assembler::vpermq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 2, "requires AVX512F"); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0x36, (unsigned char)(0xC0 | encode)); } void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x46, 0xC0 | encode, imm8); } void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x06, 0xC0 | encode, imm8); } void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x76, (unsigned char)(0xC0 | encode)); } void Assembler::pause() { ! emit_int16((unsigned char)0xF3, (unsigned char)0x90); } void Assembler::ud2() { ! emit_int16(0x0F, 0x0B); } void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse4_2(), ""); InstructionMark im(this);
*** 3570,3610 **** void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x61); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x74); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x74); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x74); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionMark im(this); --- 3379,3414 ---- void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x61, (unsigned char)(0xC0 | encode), imm8); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x74, (unsigned char)(0xC0 | encode)); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x74, (unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x74, (unsigned char)(0xC0 | encode)); } void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionMark im(this);
*** 3634,3658 **** void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x3E); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(vcc); } void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.reset_is_clear_context(); attributes.set_embedded_opmask_register_specifier(mask); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x3E); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(vcc); } void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionMark im(this); --- 3438,3458 ---- void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x3E, (unsigned char)(0xC0 | encode), vcc); } void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); attributes.reset_is_clear_context(); attributes.set_embedded_opmask_register_specifier(mask); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x3E, (unsigned char)(0xC0 | encode), vcc); } void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); InstructionMark im(this);
*** 3694,3724 **** // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x75); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x75); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x75); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionMark im(this); --- 3494,3521 ---- // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x75, (unsigned char)(0xC0 | encode)); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x75, (unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x75, (unsigned char)(0xC0 | encode)); } void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionMark im(this);
*** 3743,3765 **** // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x76); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); attributes.reset_is_clear_context(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x76); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this); --- 3540,3560 ---- // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x76, (unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); attributes.reset_is_clear_context(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x76, (unsigned char)(0xC0 | encode)); } void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionMark im(this);
*** 3776,3807 **** // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x29); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x29); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.reset_is_clear_context(); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x29); ! emit_int8((unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); --- 3571,3599 ---- // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x29, (unsigned char)(0xC0 | encode)); } // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x29, (unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.reset_is_clear_context(); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x29, (unsigned char)(0xC0 | encode)); } // In this context, kdst is written the mask used to process the equal components void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_evex(), "");
*** 3818,3846 **** void Assembler::pmovmskb(Register dst, XMMRegister src) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD7); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmovmskb(Register dst, XMMRegister src) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD7); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x16); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); --- 3610,3634 ---- void Assembler::pmovmskb(Register dst, XMMRegister src) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD7, (unsigned char)(0xC0 | encode)); } void Assembler::vpmovmskb(Register dst, XMMRegister src) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD7, (unsigned char)(0xC0 | encode)); } void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x16, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pextrd(Address dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
*** 3853,3865 **** void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x16); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); --- 3641,3651 ---- void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x16, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pextrq(Address dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
*** 3872,3884 **** void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xC5); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); --- 3658,3668 ---- void Assembler::pextrw(Register dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24((unsigned char)0xC5, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pextrw(Address dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
*** 3901,3913 **** void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x22); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); --- 3685,3695 ---- void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x22, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
*** 3920,3932 **** void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x22); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); --- 3702,3712 ---- void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x22, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
*** 3939,3951 **** void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xC4); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); --- 3719,3729 ---- void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24((unsigned char)0xC4, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) { assert(VM_Version::supports_sse2(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
*** 3978,3997 **** void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x30); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x20); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 3756,3773 ---- void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x30, (unsigned char)(0xC0 | encode)); } void Assembler::pmovsxbw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x20, (unsigned char)(0xC0 | encode)); } void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 4007,4028 **** assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x30); ! emit_int8((unsigned char) (0xC0 | encode)); } void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x20); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); assert(dst != xnoreg, "sanity"); --- 3783,3802 ---- assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x30, (unsigned char) (0xC0 | encode)); } void Assembler::vpmovsxbw(XMMRegister dst, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x20, (unsigned char)(0xC0 | encode)); } void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) { assert(VM_Version::supports_avx512vlbw(), ""); assert(dst != xnoreg, "sanity");
*** 4077,4116 **** assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x33); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF5); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : (vector_len == AVX_256bit ? VM_Version::supports_avx2() : (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF5); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_avx512_vnni(), "must support vnni"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x52); ! emit_int8((unsigned char)(0xC0 | encode)); } // generic void Assembler::pop(Register dst) { int encode = prefix_and_encode(dst->encoding()); --- 3851,3886 ---- assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " "); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x33, (unsigned char)(0xC0 | encode)); } void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF5, (unsigned char)(0xC0 | encode)); } void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : (vector_len == AVX_256bit ? VM_Version::supports_avx2() : (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF5, (unsigned char)(0xC0 | encode)); } void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); assert(VM_Version::supports_avx512_vnni(), "must support vnni"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x52, (unsigned char)(0xC0 | encode)); } // generic void Assembler::pop(Register dst) { int encode = prefix_and_encode(dst->encoding());
*** 4120,4150 **** void Assembler::popcntl(Register dst, Address src) { assert(VM_Version::supports_popcnt(), "must support"); InstructionMark im(this); emit_int8((unsigned char)0xF3); prefix(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB8); emit_operand(dst, src); } void Assembler::popcntl(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x55); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::popf() { emit_int8((unsigned char)0x9D); } --- 3890,3916 ---- void Assembler::popcntl(Register dst, Address src) { assert(VM_Version::supports_popcnt(), "must support"); InstructionMark im(this); emit_int8((unsigned char)0xF3); prefix(src, dst); ! emit_int16(0x0F, (unsigned char)0xB8); emit_operand(dst, src); } void Assembler::popcntl(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xB8, (unsigned char)(0xC0 | encode)); } void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512_vpopcntdq(), "must support vpopcntdq feature"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x55, (unsigned char)(0xC0 | encode)); } void Assembler::popf() { emit_int8((unsigned char)0x9D); }
*** 4157,4216 **** emit_int8((unsigned char)0x8F); emit_operand(rax, dst); } #endif - void Assembler::prefetch_prefix(Address src) { - prefix(src); - emit_int8(0x0F); - } - void Assembler::prefetchnta(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefetch_prefix(src); ! emit_int8(0x18); emit_operand(rax, src); // 0, src } void Assembler::prefetchr(Address src) { assert(VM_Version::supports_3dnow_prefetch(), "must support"); InstructionMark im(this); ! prefetch_prefix(src); ! emit_int8(0x0D); emit_operand(rax, src); // 0, src } void Assembler::prefetcht0(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefetch_prefix(src); ! emit_int8(0x18); emit_operand(rcx, src); // 1, src } void Assembler::prefetcht1(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefetch_prefix(src); ! emit_int8(0x18); emit_operand(rdx, src); // 2, src } void Assembler::prefetcht2(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefetch_prefix(src); ! emit_int8(0x18); emit_operand(rbx, src); // 3, src } void Assembler::prefetchw(Address src) { assert(VM_Version::supports_3dnow_prefetch(), "must support"); InstructionMark im(this); ! prefetch_prefix(src); ! emit_int8(0x0D); emit_operand(rcx, src); // 1, src } void Assembler::prefix(Prefix p) { emit_int8(p); --- 3923,3977 ---- emit_int8((unsigned char)0x8F); emit_operand(rax, dst); } #endif void Assembler::prefetchnta(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefix(src); ! emit_int16(0x0F, 0x18); emit_operand(rax, src); // 0, src } void Assembler::prefetchr(Address src) { assert(VM_Version::supports_3dnow_prefetch(), "must support"); InstructionMark im(this); ! prefix(src); ! emit_int16(0x0F, 0x0D); emit_operand(rax, src); // 0, src } void Assembler::prefetcht0(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefix(src); ! emit_int16(0x0F, 0x18); emit_operand(rcx, src); // 1, src } void Assembler::prefetcht1(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefix(src); ! emit_int16(0x0F, 0x18); emit_operand(rdx, src); // 2, src } void Assembler::prefetcht2(Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "must support")); InstructionMark im(this); ! prefix(src); ! emit_int16(0x0F, 0x18); emit_operand(rbx, src); // 3, src } void Assembler::prefetchw(Address src) { assert(VM_Version::supports_3dnow_prefetch(), "must support"); InstructionMark im(this); ! prefix(src); ! emit_int16(0x0F, 0x0D); emit_operand(rcx, src); // 1, src } void Assembler::prefix(Prefix p) { emit_int8(p);
*** 4218,4239 **** void Assembler::pshufb(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x00); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x00); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pshufb(XMMRegister dst, Address src) { assert(VM_Version::supports_ssse3(), ""); InstructionMark im(this); --- 3979,3998 ---- void Assembler::pshufb(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x00, (unsigned char)(0xC0 | encode)); } void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x00, (unsigned char)(0xC0 | encode)); } void Assembler::pshufb(XMMRegister dst, Address src) { assert(VM_Version::supports_ssse3(), ""); InstructionMark im(this);
*** 4248,4272 **** assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x70); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(mode & 0xFF); } void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : (vector_len == AVX_256bit? VM_Version::supports_avx2() : (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), ""); NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x70); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(mode & 0xFF); } void Assembler::pshufd(XMMRegister dst, Address src, int mode) { assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); --- 4007,4027 ---- assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit; InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x70, (unsigned char)(0xC0 | encode), mode & 0xFF); } void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : (vector_len == AVX_256bit? VM_Version::supports_avx2() : (vector_len == AVX_512bit? VM_Version::supports_evex() : 0)), ""); NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x70, (unsigned char)(0xC0 | encode), mode & 0xFF); } void Assembler::pshufd(XMMRegister dst, Address src, int mode) { assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), ""));
*** 4283,4295 **** void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x70); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(mode & 0xFF); } void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); --- 4038,4048 ---- void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int24(0x70, (unsigned char)(0xC0 | encode), mode & 0xFF); } void Assembler::pshuflw(XMMRegister dst, Address src, int mode) { assert(isByte(mode), "invalid value"); NOT_LP64(assert(VM_Version::supports_sse2(), ""));
*** 4307,4362 **** assert(VM_Version::supports_evex(), "requires EVEX support"); assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x43); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8 & 0xFF); } void Assembler::psrldq(XMMRegister dst, int shift) { // Shift left 128 bit value in dst XMMRegister by shift number of bytes. NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift); } void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : vector_len == AVX_256bit ? VM_Version::supports_avx2() : vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::pslldq(XMMRegister dst, int shift) { // Shift left 128 bit value in dst XMMRegister by shift number of bytes. NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM7 is for /7 encoding: 66 0F 73 /7 ib int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift); } void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : vector_len == AVX_256bit ? VM_Version::supports_avx2() : vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::ptest(XMMRegister dst, Address src) { assert(VM_Version::supports_sse4_1(), ""); assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); --- 4060,4105 ---- assert(VM_Version::supports_evex(), "requires EVEX support"); assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x43, (unsigned char)(0xC0 | encode), imm8 & 0xFF); } void Assembler::psrldq(XMMRegister dst, int shift) { // Shift left 128 bit value in dst XMMRegister by shift number of bytes. NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift); } void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : vector_len == AVX_256bit ? VM_Version::supports_avx2() : vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::pslldq(XMMRegister dst, int shift) { // Shift left 128 bit value in dst XMMRegister by shift number of bytes. NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM7 is for /7 encoding: 66 0F 73 /7 ib int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift); } void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(vector_len == AVX_128bit ? VM_Version::supports_avx() : vector_len == AVX_256bit ? VM_Version::supports_avx2() : vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, ""); InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::ptest(XMMRegister dst, Address src) { assert(VM_Version::supports_sse4_1(), ""); assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
*** 4388,4399 **** void Assembler::vptest(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x17); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::punpcklbw(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); --- 4131,4141 ---- void Assembler::vptest(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x17, (unsigned char)(0xC0 | encode)); } void Assembler::punpcklbw(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
*** 4407,4418 **** void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x60); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::punpckldq(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes"); --- 4149,4159 ---- void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x60, (unsigned char)(0xC0 | encode)); } void Assembler::punpckldq(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
*** 4426,4446 **** void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x62); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::push(int32_t imm32) { // in 64bits we push 64bits onto the stack but only // take a 32bit immediate --- 4167,4185 ---- void Assembler::punpckldq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x62, (unsigned char)(0xC0 | encode)); } void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6C, (unsigned char)(0xC0 | encode)); } void Assembler::push(int32_t imm32) { // in 64bits we push 64bits onto the stack but only // take a 32bit immediate
*** 4448,4458 **** emit_int32(imm32); } void Assembler::push(Register src) { int encode = prefix_and_encode(src->encoding()); - emit_int8(0x50 | encode); } void Assembler::pushf() { emit_int8((unsigned char)0x9C); --- 4187,4196 ----
*** 4470,4549 **** void Assembler::rcll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xD0 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)0xD0 | encode); ! emit_int8(imm8); } } void Assembler::rcpps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x53); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::rcpss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x53); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::rdtsc() { ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0x31); } // copies data from [esi] to [edi] using rcx pointer sized words // generic void Assembler::rep_mov() { ! emit_int8((unsigned char)0xF3); // MOVSQ ! LP64_ONLY(prefix(REX_W)); ! emit_int8((unsigned char)0xA5); } // sets rcx bytes with rax, value at [edi] void Assembler::rep_stosb() { ! emit_int8((unsigned char)0xF3); // REP ! LP64_ONLY(prefix(REX_W)); ! emit_int8((unsigned char)0xAA); // STOSB } // sets rcx pointer sized words with rax, value at [edi] // generic void Assembler::rep_stos() { ! emit_int8((unsigned char)0xF3); // REP ! LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD ! emit_int8((unsigned char)0xAB); } // scans rcx pointer sized words at [edi] for occurance of rax, // generic void Assembler::repne_scan() { // repne_scan - emit_int8((unsigned char)0xF2); // SCASQ ! LP64_ONLY(prefix(REX_W)); ! emit_int8((unsigned char)0xAF); } #ifdef _LP64 // scans rcx 4 byte words at [edi] for occurance of rax, // generic void Assembler::repne_scanl() { // repne_scan - emit_int8((unsigned char)0xF2); // SCASL ! emit_int8((unsigned char)0xAF); } #endif void Assembler::ret(int imm16) { if (imm16 == 0) { --- 4208,4281 ---- void Assembler::rcll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xD0 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)0xD0 | encode, imm8); } } void Assembler::rcpps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x53, (unsigned char)(0xC0 | encode)); } void Assembler::rcpss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x53, (unsigned char)(0xC0 | encode)); } void Assembler::rdtsc() { ! emit_int16((unsigned char)0x0F, (unsigned char)0x31); } // copies data from [esi] to [edi] using rcx pointer sized words // generic void Assembler::rep_mov() { ! // REP // MOVSQ ! LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xA5);) ! NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xA5);) } // sets rcx bytes with rax, value at [edi] void Assembler::rep_stosb() { ! // REP ! // STOSB ! LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAA);) ! NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAA);) } // sets rcx pointer sized words with rax, value at [edi] // generic void Assembler::rep_stos() { ! // REP ! // LP64:STOSQ, LP32:STOSD ! LP64_ONLY(emit_int24((unsigned char)0xF3, REX_W, (unsigned char)0xAB);) ! NOT_LP64( emit_int16((unsigned char)0xF3, (unsigned char)0xAB);) } // scans rcx pointer sized words at [edi] for occurance of rax, // generic void Assembler::repne_scan() { // repne_scan // SCASQ ! LP64_ONLY(emit_int24((unsigned char)0xF2, REX_W, (unsigned char)0xAF);) ! NOT_LP64( emit_int16((unsigned char)0xF2, (unsigned char)0xAF);) } #ifdef _LP64 // scans rcx 4 byte words at [edi] for occurance of rax, // generic void Assembler::repne_scanl() { // repne_scan // SCASL ! emit_int16((unsigned char)0xF2, (unsigned char)0xAF); } #endif void Assembler::ret(int imm16) { if (imm16 == 0) {
*** 4564,4586 **** void Assembler::sarl(Register dst, int imm8) { int encode = prefix_and_encode(dst->encoding()); assert(isShiftCount(imm8), "illegal shift count"); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xF8 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xF8 | encode)); ! emit_int8(imm8); } } void Assembler::sarl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xF8 | encode)); } void Assembler::sbbl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst); --- 4296,4314 ---- void Assembler::sarl(Register dst, int imm8) { int encode = prefix_and_encode(dst->encoding()); assert(isShiftCount(imm8), "illegal shift count"); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xF8 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xF8 | encode), imm8); } } void Assembler::sarl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xF8 | encode)); } void Assembler::sbbl(Address dst, int32_t imm32) { InstructionMark im(this); prefix(dst);
*** 4606,4772 **** } void Assembler::setb(Condition cc, Register dst) { assert(0 <= cc && cc < 16, "illegal cc"); int encode = prefix_and_encode(dst->encoding(), true); ! emit_int8(0x0F); ! emit_int8((unsigned char)0x90 | cc); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x3); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0x0E); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); ! emit_int8((unsigned char)0xCC); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)imm8); } void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int8((unsigned char)0xC8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int8((unsigned char)0xC9); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int8((unsigned char)0xCA); ! emit_int8((unsigned char)(0xC0 | encode)); } // xmm0 is implicit additional source to this instruction. void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int8((unsigned char)0xCB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int8((unsigned char)0xCC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int8((unsigned char)0xCD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::shll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); if (imm8 == 1 ) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xE0 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xE0 | encode)); ! emit_int8(imm8); } } void Assembler::shll(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::shrl(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xE8 | encode)); ! emit_int8(imm8); } void Assembler::shrl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xE8 | encode)); } void Assembler::shldl(Register dst, Register src) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xA5); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::shldl(Register dst, Register src, int8_t imm8) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xA4); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::shrdl(Register dst, Register src) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::shrdl(Register dst, Register src, int8_t imm8) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAC); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } // copies a single word from [esi] to [edi] void Assembler::smovl() { emit_int8((unsigned char)0xA5); --- 4334,4465 ---- } void Assembler::setb(Condition cc, Register dst) { assert(0 <= cc && cc < 16, "illegal cc"); int encode = prefix_and_encode(dst->encoding(), true); ! emit_int24(0x0F, (unsigned char)0x90 | cc, (unsigned char)(0xC0 | encode)); } void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_ssse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0x0F, (unsigned char)(0xC0 | encode), imm8); } void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { assert(vector_len == AVX_128bit? VM_Version::supports_avx() : vector_len == AVX_256bit? VM_Version::supports_avx2() : 0, ""); InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0x0F, (unsigned char)(0xC0 | encode), imm8); } void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x3, (unsigned char)(0xC0 | encode), imm8); } void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0x0E, (unsigned char)(0xC0 | encode), imm8); } void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false); ! emit_int24((unsigned char)0xCC, (unsigned char)(0xC0 | encode), (unsigned char)imm8); } void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int16((unsigned char)0xC8, (unsigned char)(0xC0 | encode)); } void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int16((unsigned char)0xC9, (unsigned char)(0xC0 | encode)); } void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int16((unsigned char)0xCA, (unsigned char)(0xC0 | encode)); } // xmm0 is implicit additional source to this instruction. void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int16((unsigned char)0xCB, (unsigned char)(0xC0 | encode)); } void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int16((unsigned char)0xCC, (unsigned char)(0xC0 | encode)); } void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sha(), ""); int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false); ! emit_int16((unsigned char)0xCD, (unsigned char)(0xC0 | encode)); } void Assembler::shll(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); if (imm8 == 1 ) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xE0 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xE0 | encode), imm8); } } void Assembler::shll(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xE0 | encode)); } void Assembler::shrl(Register dst, int imm8) { assert(isShiftCount(imm8), "illegal shift count"); int encode = prefix_and_encode(dst->encoding()); ! emit_int24((unsigned char)0xC1, (unsigned char)(0xE8 | encode), imm8); } void Assembler::shrl(Register dst) { int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xE8 | encode)); } void Assembler::shldl(Register dst, Register src) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int24(0x0F, (unsigned char)0xA5, (unsigned char)(0xC0 | encode)); } void Assembler::shldl(Register dst, Register src, int8_t imm8) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int32(0x0F, (unsigned char)0xA4, (unsigned char)(0xC0 | encode), imm8); } void Assembler::shrdl(Register dst, Register src) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int24(0x0F, (unsigned char)0xAD, (unsigned char)(0xC0 | encode)); } void Assembler::shrdl(Register dst, Register src, int8_t imm8) { int encode = prefix_and_encode(src->encoding(), dst->encoding()); ! emit_int32(0x0F, (unsigned char)0xAC, (unsigned char)(0xC0 | encode), imm8); } // copies a single word from [esi] to [edi] void Assembler::smovl() { emit_int8((unsigned char)0xA5);
*** 4774,4786 **** void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x0B); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)rmode); } void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) { assert(VM_Version::supports_sse4_1(), ""); InstructionMark im(this); --- 4467,4477 ---- void Assembler::roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x0B, (unsigned char)(0xC0 | encode), (unsigned char)rmode); } void Assembler::roundsd(XMMRegister dst, Address src, int32_t rmode) { assert(VM_Version::supports_sse4_1(), ""); InstructionMark im(this);
*** 4794,4805 **** void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x51); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::sqrtsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 4485,4495 ---- void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x51, (unsigned char)(0xC0 | encode)); } void Assembler::sqrtsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 4813,4824 **** void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x51); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::std() { emit_int8((unsigned char)0xFD); } --- 4503,4513 ---- void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x51, (unsigned char)(0xC0 | encode)); } void Assembler::std() { emit_int8((unsigned char)0xFD); }
*** 4843,4854 **** emit_operand(as_Register(3), dst); } else { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); prefix(dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(as_Register(3), dst); } } void Assembler::subl(Address dst, int32_t imm32) { --- 4532,4542 ---- emit_operand(as_Register(3), dst); } else { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); prefix(dst); ! emit_int16(0x0F, (unsigned char)0xAE); emit_operand(as_Register(3), dst); } } void Assembler::subl(Address dst, int32_t imm32) {
*** 4890,4901 **** void Assembler::subsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::subsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 4578,4588 ---- void Assembler::subsd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::subsd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 4909,4920 **** void Assembler::subss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::subss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 4596,4606 ---- void Assembler::subss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::subss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 4946,4957 **** int encode = dst->encoding(); if (encode == 0) { emit_int8((unsigned char)0xA9); } else { encode = prefix_and_encode(encode); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xC0 | encode)); } emit_int32(imm32); } void Assembler::testl(Register dst, Register src) { --- 4632,4642 ---- int encode = dst->encoding(); if (encode == 0) { emit_int8((unsigned char)0xA9); } else { encode = prefix_and_encode(encode); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); } emit_int32(imm32); } void Assembler::testl(Register dst, Register src) {
*** 4968,4989 **** void Assembler::tzcntl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBC); ! emit_int8((unsigned char)0xC0 | encode); } void Assembler::tzcntq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::ucomisd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 4653,4670 ---- void Assembler::tzcntl(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)0xC0 | encode); } void Assembler::tzcntq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)(0xC0 | encode)); } void Assembler::ucomisd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 4998,5009 **** void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::ucomiss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 4679,4689 ---- void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2E, (unsigned char)(0xC0 | encode)); } void Assembler::ucomiss(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 5016,5074 **** void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::xabort(int8_t imm8) { ! emit_int8((unsigned char)0xC6); ! emit_int8((unsigned char)0xF8); ! emit_int8((unsigned char)(imm8 & 0xFF)); } void Assembler::xaddb(Address dst, Register src) { InstructionMark im(this); prefix(dst, src, true); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xC0); emit_operand(src, dst); } void Assembler::xaddw(Address dst, Register src) { InstructionMark im(this); emit_int8(0x66); prefix(dst, src); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xC1); emit_operand(src, dst); } void Assembler::xaddl(Address dst, Register src) { InstructionMark im(this); prefix(dst, src); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xC1); emit_operand(src, dst); } void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { InstructionMark im(this); relocate(rtype); if (abort.is_bound()) { address entry = target(abort); assert(entry != NULL, "abort entry NULL"); intptr_t offset = entry - pc(); ! emit_int8((unsigned char)0xC7); ! emit_int8((unsigned char)0xF8); emit_int32(offset - 6); // 2 opcode + 4 address } else { abort.add_patch_at(code(), locator()); ! emit_int8((unsigned char)0xC7); ! emit_int8((unsigned char)0xF8); emit_int32(0); } } void Assembler::xchgb(Register dst, Address src) { // xchg --- 4696,4746 ---- void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2E, (unsigned char)(0xC0 | encode)); } void Assembler::xabort(int8_t imm8) { ! emit_int24((unsigned char)0xC6, (unsigned char)0xF8, (unsigned char)(imm8 & 0xFF)); } void Assembler::xaddb(Address dst, Register src) { InstructionMark im(this); prefix(dst, src, true); ! emit_int16(0x0F, (unsigned char)0xC0); emit_operand(src, dst); } void Assembler::xaddw(Address dst, Register src) { InstructionMark im(this); emit_int8(0x66); prefix(dst, src); ! emit_int16(0x0F, (unsigned char)0xC1); emit_operand(src, dst); } void Assembler::xaddl(Address dst, Register src) { InstructionMark im(this); prefix(dst, src); ! emit_int16(0x0F, (unsigned char)0xC1); emit_operand(src, dst); } void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) { InstructionMark im(this); relocate(rtype); if (abort.is_bound()) { address entry = target(abort); assert(entry != NULL, "abort entry NULL"); intptr_t offset = entry - pc(); ! emit_int16((unsigned char)0xC7, (unsigned char)0xF8); emit_int32(offset - 6); // 2 opcode + 4 address } else { abort.add_patch_at(code(), locator()); ! emit_int16((unsigned char)0xC7, (unsigned char)0xF8); emit_int32(0); } } void Assembler::xchgb(Register dst, Address src) { // xchg
*** 5093,5116 **** emit_operand(dst, src); } void Assembler::xchgl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x87); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::xend() { ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0x01); ! emit_int8((unsigned char)0xD5); } void Assembler::xgetbv() { ! emit_int8(0x0F); ! emit_int8(0x01); ! emit_int8((unsigned char)0xD0); } void Assembler::xorl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xF0, dst, imm32); --- 4765,4783 ---- emit_operand(dst, src); } void Assembler::xchgl(Register dst, Register src) { int encode = prefix_and_encode(dst->encoding(), src->encoding()); ! emit_int16((unsigned char)0x87, (unsigned char)(0xC0 | encode)); } void Assembler::xend() { ! emit_int24((unsigned char)0x0F, (unsigned char)0x01, (unsigned char)0xD5); } void Assembler::xgetbv() { ! emit_int24(0x0F, 0x01, (unsigned char)0xD0); } void Assembler::xorl(Register dst, int32_t imm32) { prefix(dst); emit_arith(0x81, 0xF0, dst, imm32);
*** 5151,5162 **** void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4818,4828 ---- void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5169,5180 **** void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4835,4845 ---- void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5189,5200 **** void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4854,4864 ---- void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5207,5234 **** void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xB9); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xB9); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4871,4895 ---- void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xB9, (unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xB9, (unsigned char)(0xC0 | encode)); } void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5243,5254 **** void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4904,4914 ---- void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5261,5272 **** void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4921,4931 ---- void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5281,5292 **** void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4940,4950 ---- void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5299,5310 **** void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } //====================VECTOR ARITHMETIC===================================== // Float-point vector arithmetic --- 4957,4967 ---- void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } //====================VECTOR ARITHMETIC===================================== // Float-point vector arithmetic
*** 5312,5323 **** void Assembler::addpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::addpd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 4969,4979 ---- void Assembler::addpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::addpd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 5332,5360 **** void Assembler::addps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 4988,5013 ---- void Assembler::addps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5379,5415 **** void Assembler::subpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::subps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5032,5064 ---- void Assembler::subpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::subps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5C, (unsigned char)(0xC0 | encode)); } void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5434,5445 **** void Assembler::mulpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::mulpd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 5083,5093 ---- void Assembler::mulpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::mulpd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 5453,5481 **** void Assembler::mulps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5101,5126 ---- void Assembler::mulps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5499,5518 **** void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xB8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xB8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { assert(VM_Version::supports_fma(), ""); InstructionMark im(this); --- 5144,5161 ---- void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xB8, (unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_fma(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xB8, (unsigned char)(0xC0 | encode)); } void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { assert(VM_Version::supports_fma(), ""); InstructionMark im(this);
*** 5536,5572 **** void Assembler::divpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::divps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5179,5211 ---- void Assembler::divpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::divps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5E, (unsigned char)(0xC0 | encode)); } void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5590,5602 **** void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x09); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)(rmode)); } void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5229,5239 ---- void Assembler::vroundpd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x09, (unsigned char)(0xC0 | encode), (unsigned char)(rmode)); } void Assembler::vroundpd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5610,5622 **** void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { assert(VM_Version::supports_evex(), "requires EVEX support"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0x09); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)(rmode)); } void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { assert(VM_Version::supports_evex(), "requires EVEX support"); assert(dst != xnoreg, "sanity"); --- 5247,5257 ---- void Assembler::vrndscalepd(XMMRegister dst, XMMRegister src, int32_t rmode, int vector_len) { assert(VM_Version::supports_evex(), "requires EVEX support"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0x09, (unsigned char)(0xC0 | encode), (unsigned char)(rmode)); } void Assembler::vrndscalepd(XMMRegister dst, Address src, int32_t rmode, int vector_len) { assert(VM_Version::supports_evex(), "requires EVEX support"); assert(dst != xnoreg, "sanity");
*** 5634,5645 **** void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x51); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5269,5279 ---- void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x51, (unsigned char)(0xC0 | encode)); } void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5653,5664 **** void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x51); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5287,5297 ---- void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x51, (unsigned char)(0xC0 | encode)); } void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5672,5691 **** void Assembler::andpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x54); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::andps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x54); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::andps(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); --- 5305,5322 ---- void Assembler::andpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x54, (unsigned char)(0xC0 | encode)); } void Assembler::andps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x54, (unsigned char)(0xC0 | encode)); } void Assembler::andps(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this);
*** 5710,5729 **** void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x54); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x54); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5341,5358 ---- void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x54, (unsigned char)(0xC0 | encode)); } void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x54, (unsigned char)(0xC0 | encode)); } void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5757,5785 **** void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x14); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::xorpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x57); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::xorps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x57); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::xorpd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 5386,5411 ---- void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x14, (unsigned char)(0xC0 | encode)); } void Assembler::xorpd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x57, (unsigned char)(0xC0 | encode)); } void Assembler::xorps(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x57, (unsigned char)(0xC0 | encode)); } void Assembler::xorpd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 5804,5823 **** void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x57); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8(0x57); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this); --- 5430,5447 ---- void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x57, (unsigned char)(0xC0 | encode)); } void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int16(0x57, (unsigned char)(0xC0 | encode)); } void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); InstructionMark im(this);
*** 5843,5887 **** void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx() && (vector_len == 0) || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x01); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx() && (vector_len == 0) || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x02); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::paddb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::paddw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::paddd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFE); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::paddd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 5467,5506 ---- void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx() && (vector_len == 0) || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x01, (unsigned char)(0xC0 | encode)); } void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx() && (vector_len == 0) || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x02, (unsigned char)(0xC0 | encode)); } void Assembler::paddb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFC, (unsigned char)(0xC0 | encode)); } void Assembler::paddw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFD, (unsigned char)(0xC0 | encode)); } void Assembler::paddd(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFE, (unsigned char)(0xC0 | encode)); } void Assembler::paddd(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 5894,5954 **** void Assembler::paddq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD4); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::phaddw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x01); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::phaddd(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x02); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFE); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD4); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this); --- 5513,5566 ---- void Assembler::paddq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD4, (unsigned char)(0xC0 | encode)); } void Assembler::phaddw(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x01, (unsigned char)(0xC0 | encode)); } void Assembler::phaddd(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse3(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x02, (unsigned char)(0xC0 | encode)); } void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFC, (unsigned char)(0xC0 | encode)); } void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFD, (unsigned char)(0xC0 | encode)); } void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFE, (unsigned char)(0xC0 | encode)); } void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD4, (unsigned char)(0xC0 | encode)); } void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this);
*** 5992,6018 **** void Assembler::psubb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psubw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF9); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psubd(XMMRegister dst, XMMRegister src) { InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFA); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psubq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); --- 5604,5627 ---- void Assembler::psubb(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF8, (unsigned char)(0xC0 | encode)); } void Assembler::psubw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF9, (unsigned char)(0xC0 | encode)); } void Assembler::psubd(XMMRegister dst, XMMRegister src) { InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFA, (unsigned char)(0xC0 | encode)); } void Assembler::psubq(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
*** 6024,6060 **** void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF9); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFA); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xFB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this); --- 5633,5665 ---- void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF8, (unsigned char)(0xC0 | encode)); } void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF9, (unsigned char)(0xC0 | encode)); } void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFA, (unsigned char)(0xC0 | encode)); } void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xFB, (unsigned char)(0xC0 | encode)); } void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this);
*** 6098,6142 **** void Assembler::pmullw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD5); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pmulld(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x40); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD5); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x40); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 2, "requires some form of EVEX"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x40); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this); --- 5703,5742 ---- void Assembler::pmullw(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD5, (unsigned char)(0xC0 | encode)); } void Assembler::pmulld(XMMRegister dst, XMMRegister src) { assert(VM_Version::supports_sse4_1(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x40, (unsigned char)(0xC0 | encode)); } void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD5, (unsigned char)(0xC0 | encode)); } void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x40, (unsigned char)(0xC0 | encode)); } void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 2, "requires some form of EVEX"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x40, (unsigned char)(0xC0 | encode)); } void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this);
*** 6172,6430 **** void Assembler::psllw(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 71 /6 ib int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x71); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::pslld(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 72 /6 ib int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x72); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::psllq(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 73 /6 ib int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::psllw(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF1); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pslld(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psllq(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 71 /6 ib int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x71); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 72 /6 ib int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x72); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); // XMM6 is for /6 encoding: 66 0F 73 /6 ib int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF1); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } // Shift packed integers logically right by specified number of bits. void Assembler::psrlw(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 71 /2 ib int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x71); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::psrld(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 72 /2 ib int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x72); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::psrlq(XMMRegister dst, int shift) { // Do not confuse it with psrldq SSE2 instruction which // shifts 128 bit value in xmm register by number of bytes. NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); // XMM2 is for /2 encoding: 66 0F 73 /2 ib int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psrld(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 71 /2 ib int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x71); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 72 /2 ib int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x72); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); // XMM2 is for /2 encoding: 66 0F 73 /2 ib int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x10); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x12); ! emit_int8((unsigned char)(0xC0 | encode)); } // Shift packed integers arithmetically right by specified number of bits. void Assembler::psraw(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM4 is for /4 encoding: 66 0F 71 /4 ib int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x71); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::psrad(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); --- 5772,5990 ---- void Assembler::psllw(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 71 /6 ib int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::pslld(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 72 /6 ib int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::psllq(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 73 /6 ib int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::psllw(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF1, (unsigned char)(0xC0 | encode)); } void Assembler::pslld(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); } void Assembler::psllq(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 71 /6 ib int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM6 is for /6 encoding: 66 0F 72 /6 ib int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); // XMM6 is for /6 encoding: 66 0F 73 /6 ib int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF1, (unsigned char)(0xC0 | encode)); } void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); } void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } // Shift packed integers logically right by specified number of bits. void Assembler::psrlw(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 71 /2 ib int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::psrld(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 72 /2 ib int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::psrlq(XMMRegister dst, int shift) { // Do not confuse it with psrldq SSE2 instruction which // shifts 128 bit value in xmm register by number of bytes. NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); // XMM2 is for /2 encoding: 66 0F 73 /2 ib int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::psrlw(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD1, (unsigned char)(0xC0 | encode)); } void Assembler::psrld(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD2, (unsigned char)(0xC0 | encode)); } void Assembler::psrlq(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xC0 | encode)); } void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 71 /2 ib int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM2 is for /2 encoding: 66 0F 72 /2 ib int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); // XMM2 is for /2 encoding: 66 0F 73 /2 ib int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x73, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD1, (unsigned char)(0xC0 | encode)); } void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD2, (unsigned char)(0xC0 | encode)); } void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xC0 | encode)); } void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x10, (unsigned char)(0xC0 | encode)); } void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x12, (unsigned char)(0xC0 | encode)); } // Shift packed integers arithmetically right by specified number of bits. void Assembler::psraw(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM4 is for /4 encoding: 66 0F 71 /4 ib int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::psrad(XMMRegister dst, int shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
*** 6437,6530 **** void Assembler::psraw(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE1); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::psrad(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM4 is for /4 encoding: 66 0F 71 /4 ib int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x71); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM4 is for /4 encoding: 66 0F 71 /4 ib int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x72); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE1); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 2, "requires AVX512"); assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0x72); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(shift & 0xFF); } void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 2, "requires AVX512"); assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xE2); ! emit_int8((unsigned char)(0xC0 | encode)); } // logical operations packed integers void Assembler::pand(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xDB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xDB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this); --- 5997,6077 ---- void Assembler::psraw(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE1, (unsigned char)(0xC0 | encode)); } void Assembler::psrad(XMMRegister dst, XMMRegister shift) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE2, (unsigned char)(0xC0 | encode)); } void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); // XMM4 is for /4 encoding: 66 0F 71 /4 ib int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x71, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); // XMM4 is for /4 encoding: 66 0F 71 /4 ib int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24(0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE1, (unsigned char)(0xC0 | encode)); } void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE2, (unsigned char)(0xC0 | encode)); } void Assembler::evpsraq(XMMRegister dst, XMMRegister src, int shift, int vector_len) { assert(UseAVX > 2, "requires AVX512"); assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24((unsigned char)0x72, (unsigned char)(0xC0 | encode), shift & 0xFF); } void Assembler::evpsraq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(UseAVX > 2, "requires AVX512"); assert ((VM_Version::supports_avx512vl() || vector_len == 2), "requires AVX512vl"); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xE2, (unsigned char)(0xC0 | encode)); } // logical operations packed integers void Assembler::pand(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xDB, (unsigned char)(0xC0 | encode)); } void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xDB, (unsigned char)(0xC0 | encode)); } void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this);
*** 6537,6548 **** void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xDB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); --- 6084,6094 ---- void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xDB, (unsigned char)(0xC0 | encode)); } void Assembler::vpshldvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
*** 6555,6600 **** void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x73); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pandn(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xDF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xDF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::por(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xEB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xEB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this); --- 6101,6141 ---- void Assembler::vpshrdvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) { assert(VM_Version::supports_avx512_vbmi2(), "requires vbmi2"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x73, (unsigned char)(0xC0 | encode)); } void Assembler::pandn(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); } void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xDF, (unsigned char)(0xC0 | encode)); } void Assembler::por(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xEB, (unsigned char)(0xC0 | encode)); } void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xEB, (unsigned char)(0xC0 | encode)); } void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this);
*** 6607,6635 **** void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xEB); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::pxor(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xEF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xEF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this); --- 6148,6173 ---- void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xEB, (unsigned char)(0xC0 | encode)); } void Assembler::pxor(XMMRegister dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xEF, (unsigned char)(0xC0 | encode)); } void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16((unsigned char)0xEF, (unsigned char)(0xC0 | encode)); } void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { assert(UseAVX > 0, "requires some form of AVX"); InstructionMark im(this);
*** 6667,6681 **** void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x38); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into lower 128 bits // 0x01 - insert into upper 128 bits ! emit_int8(imm8 & 0x01); } void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity"); --- 6205,6218 ---- void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // last byte: // 0x00 - insert into lower 128 bits // 0x01 - insert into upper 128 bits ! emit_int24(0x38, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity");
*** 6695,6711 **** assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x38); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into q0 128 bits (0..127) // 0x01 - insert into q1 128 bits (128..255) // 0x02 - insert into q2 128 bits (256..383) // 0x03 - insert into q3 128 bits (384..511) ! emit_int8(imm8 & 0x03); } void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity"); --- 6232,6247 ---- assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - insert into q0 128 bits (0..127) // 0x01 - insert into q1 128 bits (128..255) // 0x02 - insert into q2 128 bits (256..383) // 0x03 - insert into q3 128 bits (384..511) ! emit_int24(0x38, (unsigned char)(0xC0 | encode), imm8 & 0x03); } void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity");
*** 6728,6757 **** assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x3A); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into lower 256 bits // 0x01 - insert into upper 256 bits ! emit_int8(imm8 & 0x01); } // vinsertf forms void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x18); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into lower 128 bits // 0x01 - insert into upper 128 bits ! emit_int8(imm8 & 0x01); } void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity"); --- 6264,6291 ---- assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! //imm8: // 0x00 - insert into lower 256 bits // 0x01 - insert into upper 256 bits ! emit_int24(0x3A, (unsigned char)(0xC0 | encode), imm8 & 0x01); } // vinsertf forms void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - insert into lower 128 bits // 0x01 - insert into upper 128 bits ! emit_int24(0x18, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity");
*** 6770,6786 **** void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x18); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into q0 128 bits (0..127) // 0x01 - insert into q1 128 bits (128..255) // 0x02 - insert into q0 128 bits (256..383) // 0x03 - insert into q1 128 bits (384..512) ! emit_int8(imm8 & 0x03); } void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity"); --- 6304,6319 ---- void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - insert into q0 128 bits (0..127) // 0x01 - insert into q1 128 bits (128..255) // 0x02 - insert into q0 128 bits (256..383) // 0x03 - insert into q1 128 bits (384..512) ! emit_int24(0x18, (unsigned char)(0xC0 | encode), imm8 & 0x03); } void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity");
*** 6802,6816 **** assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x1A); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - insert into lower 256 bits // 0x01 - insert into upper 256 bits ! emit_int8(imm8 & 0x01); } void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(dst != xnoreg, "sanity"); --- 6335,6348 ---- assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - insert into lower 256 bits // 0x01 - insert into upper 256 bits ! emit_int24(0x1A, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(dst != xnoreg, "sanity");
*** 6833,6847 **** void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x39); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from lower 128 bits // 0x01 - extract from upper 128 bits ! emit_int8(imm8 & 0x01); } void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(src != xnoreg, "sanity"); --- 6365,6378 ---- void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from lower 128 bits // 0x01 - extract from upper 128 bits ! emit_int24(0x39, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx2(), ""); assert(src != xnoreg, "sanity");
*** 6862,6878 **** assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x39); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int8(imm8 & 0x03); } void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity"); --- 6393,6408 ---- assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int24(0x39, (unsigned char)(0xC0 | encode), imm8 & 0x03); } void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity");
*** 6896,6925 **** assert(VM_Version::supports_avx512dq(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x39); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int8(imm8 & 0x03); } void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x3B); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from lower 256 bits // 0x01 - extract from upper 256 bits ! emit_int8(imm8 & 0x01); } void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity"); --- 6426,6453 ---- assert(VM_Version::supports_avx512dq(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int24(0x39, (unsigned char)(0xC0 | encode), imm8 & 0x03); } void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from lower 256 bits // 0x01 - extract from upper 256 bits ! emit_int24(0x3B, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity");
*** 6941,6955 **** void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x19); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from lower 128 bits // 0x01 - extract from upper 128 bits ! emit_int8(imm8 & 0x01); } void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(src != xnoreg, "sanity"); --- 6469,6482 ---- void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from lower 128 bits // 0x01 - extract from upper 128 bits ! emit_int24(0x19, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_avx(), ""); assert(src != xnoreg, "sanity");
*** 6970,6986 **** assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x19); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int8(imm8 & 0x03); } void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity"); --- 6497,6512 ---- assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int24(0x19, (unsigned char)(0xC0 | encode), imm8 & 0x03); } void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity");
*** 7004,7033 **** assert(VM_Version::supports_avx512dq(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x19); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int8(imm8 & 0x03); } void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x1B); ! emit_int8((unsigned char)(0xC0 | encode)); // 0x00 - extract from lower 256 bits // 0x01 - extract from upper 256 bits ! emit_int8(imm8 & 0x01); } void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity"); --- 6530,6557 ---- assert(VM_Version::supports_avx512dq(), ""); assert(imm8 <= 0x03, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from bits 127:0 // 0x01 - extract from bits 255:128 // 0x02 - extract from bits 383:256 // 0x03 - extract from bits 511:384 ! emit_int24(0x19, (unsigned char)(0xC0 | encode), imm8 & 0x03); } void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(imm8 <= 0x01, "imm8: %u", imm8); InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! // imm8: // 0x00 - extract from lower 256 bits // 0x01 - extract from upper 256 bits ! emit_int24(0x1B, (unsigned char)(0xC0 | encode), imm8 & 0x01); } void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) { assert(VM_Version::supports_evex(), ""); assert(src != xnoreg, "sanity");
*** 7048,7059 **** // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x78); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity"); --- 6572,6582 ---- // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x78, (unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity");
*** 7069,7080 **** // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x79); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity"); --- 6592,6602 ---- // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x79, (unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity");
*** 7092,7103 **** // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { assert(UseAVX >= 2, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x58); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity"); --- 6614,6624 ---- // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { assert(UseAVX >= 2, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x58, (unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity");
*** 7114,7125 **** void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x59); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity"); --- 6635,6645 ---- void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x59, (unsigned char)(0xC0 | encode)); } void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx2(), ""); assert(dst != xnoreg, "sanity");
*** 7136,7147 **** assert(vector_len != Assembler::AVX_128bit, ""); assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x5A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { assert(vector_len != Assembler::AVX_128bit, ""); assert(VM_Version::supports_avx512dq(), ""); --- 6656,6666 ---- assert(vector_len != Assembler::AVX_128bit, ""); assert(VM_Version::supports_avx512dq(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x5A, (unsigned char)(0xC0 | encode)); } void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) { assert(vector_len != Assembler::AVX_128bit, ""); assert(VM_Version::supports_avx512dq(), "");
*** 7161,7172 **** // duplicate single precision data from src into programmed locations in dest : requires AVX512VL void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x18); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity"); --- 6680,6690 ---- // duplicate single precision data from src into programmed locations in dest : requires AVX512VL void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x18, (unsigned char)(0xC0 | encode)); } void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(dst != xnoreg, "sanity");
*** 7184,7195 **** assert(VM_Version::supports_avx2(), ""); assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x19); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); --- 6702,6712 ---- assert(VM_Version::supports_avx2(), ""); assert(vector_len == AVX_256bit || vector_len == AVX_512bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x19, (unsigned char)(0xC0 | encode)); } void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len == AVX_256bit || vector_len == AVX_512bit, "");
*** 7211,7252 **** void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x7A); ! emit_int8((unsigned char)(0xC0 | encode)); } // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x7B); ! emit_int8((unsigned char)(0xC0 | encode)); } // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x7C); ! emit_int8((unsigned char)(0xC0 | encode)); } // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8(0x7C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); assert(dst != xnoreg, "sanity"); InstructionMark im(this); --- 6728,6765 ---- void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x7A, (unsigned char)(0xC0 | encode)); } // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_avx512bw(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x7B, (unsigned char)(0xC0 | encode)); } // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x7C, (unsigned char)(0xC0 | encode)); } // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) { assert(VM_Version::supports_evex(), ""); InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16(0x7C, (unsigned char)(0xC0 | encode)); } void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) { assert(VM_Version::supports_evex(), ""); assert(dst != xnoreg, "sanity"); InstructionMark im(this);
*** 7263,7295 **** // Carry-Less Multiplication Quadword void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { assert(VM_Version::supports_clmul(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x44); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)mask); } // Carry-Less Multiplication Quadword void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x44); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)mask); } void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8(0x44); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)mask); } void Assembler::vzeroupper_uncached() { if (VM_Version::supports_vzeroupper()) { InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); --- 6776,6802 ---- // Carry-Less Multiplication Quadword void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) { assert(VM_Version::supports_clmul(), ""); InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x44, (unsigned char)(0xC0 | encode), (unsigned char)mask); } // Carry-Less Multiplication Quadword void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x44, (unsigned char)(0xC0 | encode), (unsigned char)mask); } void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) { assert(VM_Version::supports_avx512_vpclmulqdq(), "Requires vector carryless multiplication support"); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true); attributes.set_is_evex_instruction(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24(0x44, (unsigned char)(0xC0 | encode), (unsigned char)mask); } void Assembler::vzeroupper_uncached() { if (VM_Version::supports_vzeroupper()) { InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
*** 7306,7317 **** } void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { // NO PREFIX AS NEVER 64BIT InstructionMark im(this); ! emit_int8((unsigned char)0x81); ! emit_int8((unsigned char)(0xF8 | src1->encoding())); emit_data(imm32, rspec, 0); } void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs --- 6813,6823 ---- } void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) { // NO PREFIX AS NEVER 64BIT InstructionMark im(this); ! emit_int8((unsigned char)0x81, (unsigned char)(0xF8 | src1->encoding())); emit_data(imm32, rspec, 0); } void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) { // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
*** 7323,7335 **** // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchg8(Address adr) { ! InstructionMark im(this); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xC7); emit_operand(rcx, adr); } void Assembler::decl(Register dst) { // Don't use it directly. Use MacroAssembler::decrementl() instead. --- 6829,6840 ---- // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax, // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchg8(Address adr) { ! InstructionMark im(this); ! emit_int16(0x0F, (unsigned char)0xC7); emit_operand(rcx, adr); } void Assembler::decl(Register dst) { // Don't use it directly. Use MacroAssembler::decrementl() instead.
*** 7337,7348 **** } // 64bit doesn't use the x87 void Assembler::fabs() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xE1); } void Assembler::fadd(int i) { emit_farith(0xD8, 0xC0, i); } --- 6842,6852 ---- } // 64bit doesn't use the x87 void Assembler::fabs() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xE1); } void Assembler::fadd(int i) { emit_farith(0xD8, 0xC0, i); }
*** 7366,7377 **** void Assembler::faddp(int i) { emit_farith(0xDE, 0xC0, i); } void Assembler::fchs() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xE0); } void Assembler::fcom(int i) { emit_farith(0xD8, 0xD0, i); } --- 6870,6880 ---- void Assembler::faddp(int i) { emit_farith(0xDE, 0xC0, i); } void Assembler::fchs() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xE0); } void Assembler::fcom(int i) { emit_farith(0xD8, 0xD0, i); }
*** 7391,7412 **** emit_int8((unsigned char)0xD8); emit_operand32(rbx, src); } void Assembler::fcompp() { ! emit_int8((unsigned char)0xDE); ! emit_int8((unsigned char)0xD9); } void Assembler::fcos() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xFF); } void Assembler::fdecstp() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF6); } void Assembler::fdiv(int i) { emit_farith(0xD8, 0xF0, i); } --- 6894,6912 ---- emit_int8((unsigned char)0xD8); emit_operand32(rbx, src); } void Assembler::fcompp() { ! emit_int16((unsigned char)0xDE, (unsigned char)0xD9); } void Assembler::fcos() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xFF); } void Assembler::fdecstp() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xF6); } void Assembler::fdiv(int i) { emit_farith(0xD8, 0xF0, i); }
*** 7473,7490 **** emit_int8((unsigned char)0xDB); emit_operand32(rax, adr); } void Assembler::fincstp() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF7); } void Assembler::finit() { ! emit_int8((unsigned char)0x9B); ! emit_int8((unsigned char)0xDB); ! emit_int8((unsigned char)0xE3); } void Assembler::fist_s(Address adr) { InstructionMark im(this); emit_int8((unsigned char)0xDB); --- 6973,6987 ---- emit_int8((unsigned char)0xDB); emit_operand32(rax, adr); } void Assembler::fincstp() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xF7); } void Assembler::finit() { ! emit_int24((unsigned char)0x9B, (unsigned char)0xDB, (unsigned char)0xE3); } void Assembler::fist_s(Address adr) { InstructionMark im(this); emit_int8((unsigned char)0xDB);
*** 7502,7513 **** emit_int8((unsigned char)0xDB); emit_operand32(rbx, adr); } void Assembler::fld1() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xE8); } void Assembler::fld_d(Address adr) { InstructionMark im(this); emit_int8((unsigned char)0xDD); --- 6999,7009 ---- emit_int8((unsigned char)0xDB); emit_operand32(rbx, adr); } void Assembler::fld1() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xE8); } void Assembler::fld_d(Address adr) { InstructionMark im(this); emit_int8((unsigned char)0xDD);
*** 7542,7563 **** emit_int8((unsigned char)0xD9); emit_operand32(rsp, src); } void Assembler::fldlg2() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xEC); } void Assembler::fldln2() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xED); } void Assembler::fldz() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xEE); } void Assembler::flog() { fldln2(); fxch(); --- 7038,7056 ---- emit_int8((unsigned char)0xD9); emit_operand32(rsp, src); } void Assembler::fldlg2() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xEC); } void Assembler::fldln2() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xED); } void Assembler::fldz() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xEE); } void Assembler::flog() { fldln2(); fxch();
*** 7600,7643 **** emit_operand32(rsi, dst); } void Assembler::fnstcw(Address src) { InstructionMark im(this); ! emit_int8((unsigned char)0x9B); ! emit_int8((unsigned char)0xD9); emit_operand32(rdi, src); } void Assembler::fnstsw_ax() { ! emit_int8((unsigned char)0xDF); ! emit_int8((unsigned char)0xE0); } void Assembler::fprem() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF8); } void Assembler::fprem1() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF5); } void Assembler::frstor(Address src) { InstructionMark im(this); emit_int8((unsigned char)0xDD); emit_operand32(rsp, src); } void Assembler::fsin() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xFE); } void Assembler::fsqrt() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xFA); } void Assembler::fst_d(Address adr) { InstructionMark im(this); emit_int8((unsigned char)0xDD); --- 7093,7130 ---- emit_operand32(rsi, dst); } void Assembler::fnstcw(Address src) { InstructionMark im(this); ! emit_int16((unsigned char)0x9B, (unsigned char)0xD9); emit_operand32(rdi, src); } void Assembler::fnstsw_ax() { ! emit_int16((unsigned char)0xDF, (unsigned char)0xE0); } void Assembler::fprem() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xF8); } void Assembler::fprem1() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xF5); } void Assembler::frstor(Address src) { InstructionMark im(this); emit_int8((unsigned char)0xDD); emit_operand32(rsp, src); } void Assembler::fsin() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xFE); } void Assembler::fsqrt() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xFA); } void Assembler::fst_d(Address adr) { InstructionMark im(this); emit_int8((unsigned char)0xDD);
*** 7719,7737 **** void Assembler::fsubrp(int i) { emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) } void Assembler::ftan() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF2); ! emit_int8((unsigned char)0xDD); ! emit_int8((unsigned char)0xD8); } void Assembler::ftst() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xE4); } void Assembler::fucomi(int i) { // make sure the instruction is supported (introduced for P6, together with cmov) guarantee(VM_Version::supports_cmov(), "illegal instruction"); --- 7206,7220 ---- void Assembler::fsubrp(int i) { emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong) } void Assembler::ftan() { ! emit_int32((unsigned char)0xD9, (unsigned char)0xF2, (unsigned char)0xDD, (unsigned char)0xD8); } void Assembler::ftst() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xE4); } void Assembler::fucomi(int i) { // make sure the instruction is supported (introduced for P6, together with cmov) guarantee(VM_Version::supports_cmov(), "illegal instruction");
*** 7751,7777 **** void Assembler::fxch(int i) { emit_farith(0xD9, 0xC8, i); } void Assembler::fyl2x() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF1); } void Assembler::frndint() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xFC); } void Assembler::f2xm1() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xF0); } void Assembler::fldl2e() { ! emit_int8((unsigned char)0xD9); ! emit_int8((unsigned char)0xEA); } #endif // !_LP64 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 }; --- 7234,7256 ---- void Assembler::fxch(int i) { emit_farith(0xD9, 0xC8, i); } void Assembler::fyl2x() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xF1); } void Assembler::frndint() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xFC); } void Assembler::f2xm1() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xF0); } void Assembler::fldl2e() { ! emit_int16((unsigned char)0xD9, (unsigned char)0xEA); } #endif // !_LP64 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding. static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
*** 7815,7849 **** void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { int vector_len = _attributes->get_vector_len(); bool vex_w = _attributes->is_rex_vex_w(); if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { - prefix(VEX_3bytes); - int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); byte1 = (~byte1) & 0xE0; byte1 |= opc; - emit_int8(byte1); int byte2 = ((~nds_enc) & 0xf) << 3; byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; - emit_int8(byte2); - } else { - prefix(VEX_2bytes); int byte1 = vex_r ? VEX_R : 0; byte1 = (~byte1) & 0x80; byte1 |= ((~nds_enc) & 0xf) << 3; byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; ! emit_int8(byte1); } } // This is a 4 byte encoding void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ // EVEX 0x62 prefix ! prefix(EVEX_4bytes); bool vex_w = _attributes->is_rex_vex_w(); int evex_encoding = (vex_w ? VEX_W : 0); // EVEX.b is not currently used for broadcast of single element or data rounding modes _attributes->set_evex_encoding(evex_encoding); --- 7294,7325 ---- void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) { int vector_len = _attributes->get_vector_len(); bool vex_w = _attributes->is_rex_vex_w(); if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) { int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0); byte1 = (~byte1) & 0xE0; byte1 |= opc; int byte2 = ((~nds_enc) & 0xf) << 3; byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre; + emit_int24((unsigned char)VEX_3bytes, byte1, byte2); + } else { int byte1 = vex_r ? VEX_R : 0; byte1 = (~byte1) & 0x80; byte1 |= ((~nds_enc) & 0xf) << 3; byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre; ! emit_int16((unsigned char)VEX_2bytes, byte1); } } // This is a 4 byte encoding void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){ // EVEX 0x62 prefix ! // byte1 = EVEX_4bytes; ! bool vex_w = _attributes->is_rex_vex_w(); int evex_encoding = (vex_w ? VEX_W : 0); // EVEX.b is not currently used for broadcast of single element or data rounding modes _attributes->set_evex_encoding(evex_encoding);
*** 7852,7872 **** int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0); byte2 = (~byte2) & 0xF0; // confine opc opcode extensions in mm bits to lower two bits // of form {0F, 0F_38, 0F_3A} byte2 |= opc; - emit_int8(byte2); // P1: byte 3 as Wvvvv1pp int byte3 = ((~nds_enc) & 0xf) << 3; // p[10] is always 1 byte3 |= EVEX_F; byte3 |= (vex_w & 1) << 7; // confine pre opcode extensions in pp bits to lower two bits // of form {66, F3, F2} byte3 |= pre; - emit_int8(byte3); // P2: byte 4 as zL'Lbv'aaa // kregs are implemented in the low 3 bits as aaa int byte4 = (_attributes->is_no_reg_mask()) ? 0 : --- 7328,7346 ----
*** 7879,7893 **** byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; // last is EVEX.z for zero/merge actions if (_attributes->is_no_reg_mask() == false) { byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); } ! emit_int8(byte4); } void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { ! bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0; bool vex_b = adr.base_needs_rex(); bool vex_x; if (adr.isxmmindex()) { vex_x = adr.xmmindex_needs_rex(); } else { --- 7353,7368 ---- byte4 |= ((_attributes->get_vector_len())& 0x3) << 5; // last is EVEX.z for zero/merge actions if (_attributes->is_no_reg_mask() == false) { byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0); } ! ! emit_int32(EVEX_4bytes, byte2, byte3, byte4); } void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { ! bool vex_r = (xreg_enc & 8) == 8; bool vex_b = adr.base_needs_rex(); bool vex_x; if (adr.isxmmindex()) { vex_x = adr.xmmindex_needs_rex(); } else {
*** 7898,7908 **** // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction // is allowed in legacy mode and has resources which will fit in it. // Pure EVEX instructions will have is_evex_instruction set in their definition. if (!attributes->is_legacy_mode()) { ! if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) { if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { attributes->set_is_legacy_mode(); } } } --- 7373,7383 ---- // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction // is allowed in legacy mode and has resources which will fit in it. // Pure EVEX instructions will have is_evex_instruction set in their definition. if (!attributes->is_legacy_mode()) { ! if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) { attributes->set_is_legacy_mode(); } } }
*** 7913,7923 **** (!_legacy_mode_vl) || (attributes->is_legacy_mode())),"XMM register should be 0-15"); assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); } ! _is_managed = false; if (UseAVX > 2 && !attributes->is_legacy_mode()) { bool evex_r = (xreg_enc >= 16); bool evex_v; // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31) --- 7388,7398 ---- (!_legacy_mode_vl) || (attributes->is_legacy_mode())),"XMM register should be 0-15"); assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); } ! clear_managed(); if (UseAVX > 2 && !attributes->is_legacy_mode()) { bool evex_r = (xreg_enc >= 16); bool evex_v; // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31)
*** 7935,7955 **** vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); } } int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { ! bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0; ! bool vex_b = ((src_enc & 8) == 8) ? 1 : 0; bool vex_x = false; set_attributes(attributes); attributes->set_current_assembler(this); // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction // is allowed in legacy mode and has resources which will fit in it. // Pure EVEX instructions will have is_evex_instruction set in their definition. if (!attributes->is_legacy_mode()) { ! if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) { if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { attributes->set_is_legacy_mode(); } } --- 7410,7430 ---- vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc); } } int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) { ! bool vex_r = (dst_enc & 8) == 8; ! bool vex_b = (src_enc & 8) == 8; bool vex_x = false; set_attributes(attributes); attributes->set_current_assembler(this); // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction // is allowed in legacy mode and has resources which will fit in it. // Pure EVEX instructions will have is_evex_instruction set in their definition. if (!attributes->is_legacy_mode()) { ! if (UseAVX > 2 && !attributes->is_evex_instruction() && !is_managed()) { if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) && (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) { attributes->set_is_legacy_mode(); } }
*** 7967,7977 **** (attributes->is_legacy_mode())),"XMM register should be 0-15"); // Instruction with legacy_mode true should have dst, nds and src < 15 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); } ! _is_managed = false; if (UseAVX > 2 && !attributes->is_legacy_mode()) { bool evex_r = (dst_enc >= 16); bool evex_v = (nds_enc >= 16); // can use vex_x as bank extender on rm encoding --- 7442,7452 ---- (attributes->is_legacy_mode())),"XMM register should be 0-15"); // Instruction with legacy_mode true should have dst, nds and src < 15 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15"); } ! clear_managed(); if (UseAVX > 2 && !attributes->is_legacy_mode()) { bool evex_r = (dst_enc >= 16); bool evex_v = (nds_enc >= 16); // can use vex_x as bank extender on rm encoding
*** 8017,8121 **** void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5F); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5D); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x5D); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xC2); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)(0xF & cop)); } void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8((unsigned char)0x4B); - emit_int8((unsigned char)(0xC0 | encode)); int src2_enc = src2->encoding(); ! emit_int8((unsigned char)(0xF0 & src2_enc<<4)); } void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int8((unsigned char)0xC2); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)(0xF & cop)); } void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); - emit_int8((unsigned char)0x4A); - emit_int8((unsigned char)(0xC0 | encode)); int src2_enc = src2->encoding(); ! emit_int8((unsigned char)(0xF0 & src2_enc<<4)); } void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0x02); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8((unsigned char)imm8); } void Assembler::shlxl(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi2(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::shlxq(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi2(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xC0 | encode)); } #ifndef _LP64 void Assembler::incl(Register dst) { --- 7492,7580 ---- void Assembler::vmaxss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5F, (unsigned char)(0xC0 | encode)); } void Assembler::vmaxsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5F, (unsigned char)(0xC0 | encode)); } void Assembler::vminss(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5D, (unsigned char)(0xC0 | encode)); } void Assembler::vminsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { assert(VM_Version::supports_avx(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); attributes.set_rex_vex_w_reverted(); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x5D, (unsigned char)(0xC0 | encode)); } void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int24((unsigned char)0xC2, (unsigned char)(0xC0 | encode), (unsigned char)(0xF & cop)); } void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int src2_enc = src2->encoding(); ! emit_int24((unsigned char)0x4B, (unsigned char)(0xC0 | encode), (unsigned char)(0xF0 & src2_enc << 4)); } void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes); ! emit_int24((unsigned char)0xC2, (unsigned char)(0xC0 | encode), (unsigned char)(0xF & cop)); } void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) { assert(VM_Version::supports_avx(), ""); assert(vector_len <= AVX_256bit, ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); int src2_enc = src2->encoding(); ! emit_int24((unsigned char)0x4A, (unsigned char)(0xC0 | encode), (unsigned char)(0xF0 & src2_enc << 4)); } void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) { assert(VM_Version::supports_avx2(), ""); InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0x02, (unsigned char)(0xC0 | encode), (unsigned char)imm8); } void Assembler::shlxl(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi2(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); } void Assembler::shlxq(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi2(), ""); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true); int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); } #ifndef _LP64 void Assembler::incl(Register dst) {
*** 8154,8175 **** void Assembler::pusha() { // 32bit emit_int8(0x60); } void Assembler::set_byte_if_not_zero(Register dst) { ! emit_int8(0x0F); ! emit_int8((unsigned char)0x95); ! emit_int8((unsigned char)(0xE0 | dst->encoding())); } #else // LP64 void Assembler::set_byte_if_not_zero(Register dst) { int enc = prefix_and_encode(dst->encoding(), true); ! emit_int8(0x0F); ! emit_int8((unsigned char)0x95); ! emit_int8((unsigned char)(0xE0 | enc)); } // 64bit only pieces of the assembler // This should only be used by 64bit instructions that can use rip-relative // it cannot be used by instructions that want an immediate value. --- 7613,7630 ---- void Assembler::pusha() { // 32bit emit_int8(0x60); } void Assembler::set_byte_if_not_zero(Register dst) { ! emit_int8(0x0F, (unsigned char)0x95, (unsigned char)(0xE0 | dst->encoding())); } #else // LP64 void Assembler::set_byte_if_not_zero(Register dst) { int enc = prefix_and_encode(dst->encoding(), true); ! emit_int24(0x0F, (unsigned char)0x95, (unsigned char)(0xE0 | enc)); } // 64bit only pieces of the assembler // This should only be used by 64bit instructions that can use rip-relative // it cannot be used by instructions that want an immediate value.
*** 8315,8340 **** } return dst_enc << 3 | src_enc; } int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { ! if (dst_enc < 8) { ! if (src_enc < 8) { ! prefix(REX_W); ! } else { ! prefix(REX_WB); ! src_enc -= 8; } ! } else { ! if (src_enc < 8) { ! prefix(REX_WR); ! } else { ! prefix(REX_WRB); src_enc -= 8; } ! dst_enc -= 8; ! } return dst_enc << 3 | src_enc; } void Assembler::prefix(Register reg) { if (reg->encoding() >= 8) { --- 7770,7795 ---- } return dst_enc << 3 | src_enc; } int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { ! static const int8_t prefixes[] = { ! REX_W, ! REX_WB, ! REX_WR, ! REX_WRB ! }; ! int idx = 0; ! if (dst_enc >= 8) { ! idx |= 2; ! dst_enc -= 8; } ! if (src_enc >= 8) { src_enc -= 8; + idx |= 1; } ! emit_int8(prefixes[idx]); return dst_enc << 3 | src_enc; } void Assembler::prefix(Register reg) { if (reg->encoding() >= 8) {
*** 8388,8413 **** prefix(REX_X); } } } ! void Assembler::prefixq(Address adr) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { ! prefix(REX_WXB); } else { ! prefix(REX_WB); } } else { if (adr.index_needs_rex()) { ! prefix(REX_WX); } else { ! prefix(REX_W); } } } void Assembler::prefix(Address adr, Register reg, bool byteinst) { if (reg->encoding() < 8) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { --- 7843,7882 ---- prefix(REX_X); } } } ! int8_t Assembler::get_prefixq(Address adr) { ! static const Assembler::Prefix prefixes[] = { ! REX_W, ! REX_WX, ! REX_WB, ! REX_WXB ! }; ! int idx = (int)adr.index_needs_rex() | ((int)adr.base_needs_rex() << 1); ! Assembler::Prefix prfx = prefixes[idx]; ! #ifdef ASSERT if (adr.base_needs_rex()) { if (adr.index_needs_rex()) { ! assert(prfx == REX_WXB, "must be"); } else { ! assert(prfx == REX_WB, "must be"); } } else { if (adr.index_needs_rex()) { ! assert(prfx == REX_WX, "must be"); } else { ! assert(prfx == REX_W, "must be"); } } + #endif + return (int8_t)prfx; } + void Assembler::prefixq(Address adr) { + emit_int8(get_prefixq(adr)); + } void Assembler::prefix(Address adr, Register reg, bool byteinst) { if (reg->encoding() < 8) { if (adr.base_needs_rex()) { if (adr.index_needs_rex()) {
*** 8437,8476 **** } } } } void Assembler::prefixq(Address adr, Register src) { ! if (src->encoding() < 8) { ! if (adr.base_needs_rex()) { ! if (adr.index_needs_rex()) { ! prefix(REX_WXB); ! } else { ! prefix(REX_WB); ! } ! } else { ! if (adr.index_needs_rex()) { ! prefix(REX_WX); ! } else { ! prefix(REX_W); ! } ! } ! } else { ! if (adr.base_needs_rex()) { ! if (adr.index_needs_rex()) { ! prefix(REX_WRXB); ! } else { ! prefix(REX_WRB); ! } ! } else { ! if (adr.index_needs_rex()) { ! prefix(REX_WRX); ! } else { ! prefix(REX_WR); ! } ! } ! } } void Assembler::prefix(Address adr, XMMRegister reg) { if (reg->encoding() < 8) { if (adr.base_needs_rex()) { --- 7906,7932 ---- } } } } + int8_t Assembler::get_prefixq(Address adr, Register src) { + static const int8_t prefixes[] = { + REX_WR, + REX_WRX, + REX_WRB, + REX_WRXB, + REX_W, + REX_WX, + REX_WB, + REX_WXB, + }; + int idx = (int)adr.index_needs_rex() | ((int)adr.base_needs_rex() << 1) | ((int)(src->encoding() < 8) << 2); + return prefixes[idx]; + } + void Assembler::prefixq(Address adr, Register src) { ! emit_int8(get_prefixq(adr, src)); } void Assembler::prefix(Address adr, XMMRegister reg) { if (reg->encoding() < 8) { if (adr.base_needs_rex()) {
*** 8538,8549 **** emit_arith(0x81, 0xD0, dst, imm32); } void Assembler::adcq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x13); emit_operand(dst, src); } void Assembler::adcq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); --- 7994,8005 ---- emit_arith(0x81, 0xD0, dst, imm32); } void Assembler::adcq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), ! 0x13); emit_operand(dst, src); } void Assembler::adcq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding());
*** 8556,8579 **** emit_arith_operand(0x81, rax, dst,imm32); } void Assembler::addq(Address dst, Register src) { InstructionMark im(this); ! prefixq(dst, src); ! emit_int8(0x01); emit_operand(src, dst); } void Assembler::addq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xC0, dst, imm32); } void Assembler::addq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x03); emit_operand(dst, src); } void Assembler::addq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); --- 8012,8034 ---- emit_arith_operand(0x81, rax, dst,imm32); } void Assembler::addq(Address dst, Register src) { InstructionMark im(this); ! emit_int16(get_prefixq(dst, src), ! 0x01); emit_operand(src, dst); } void Assembler::addq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); emit_arith(0x81, 0xC0, dst, imm32); } void Assembler::addq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x03); emit_operand(dst, src); } void Assembler::addq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding());
*** 8582,8611 **** void Assembler::adcxq(Register dst, Register src) { //assert(VM_Version::supports_adx(), "adx instructions not supported"); emit_int8((unsigned char)0x66); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8(0x38); ! emit_int8((unsigned char)0xF6); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::adoxq(Register dst, Register src) { //assert(VM_Version::supports_adx(), "adx instructions not supported"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8(0x38); ! emit_int8((unsigned char)0xF6); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::andq(Address dst, int32_t imm32) { InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0x81); emit_operand(rsp, dst, 4); emit_int32(imm32); } void Assembler::andq(Register dst, int32_t imm32) { --- 8037,8066 ---- void Assembler::adcxq(Register dst, Register src) { //assert(VM_Version::supports_adx(), "adx instructions not supported"); emit_int8((unsigned char)0x66); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int32(0x0F, ! 0x38, ! (unsigned char)0xF6, ! (unsigned char)(0xC0 | encode)); } void Assembler::adoxq(Register dst, Register src) { //assert(VM_Version::supports_adx(), "adx instructions not supported"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int32(0x0F, ! 0x38, ! (unsigned char)0xF6, ! (unsigned char)(0xC0 | encode)); } void Assembler::andq(Address dst, int32_t imm32) { InstructionMark im(this); ! emit_int16(get_prefixq(dst), ! (unsigned char)0x81); emit_operand(rsp, dst, 4); emit_int32(imm32); } void Assembler::andq(Register dst, int32_t imm32) {
*** 8613,8624 **** emit_arith(0x81, 0xE0, dst, imm32); } void Assembler::andq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x23); emit_operand(dst, src); } void Assembler::andq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); --- 8068,8078 ---- emit_arith(0x81, 0xE0, dst, imm32); } void Assembler::andq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x23); emit_operand(dst, src); } void Assembler::andq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding());
*** 8627,8638 **** void Assembler::andnq(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF2); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::andnq(Register dst, Register src1, Address src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 8081,8091 ---- void Assembler::andnq(Register dst, Register src1, Register src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF2, (unsigned char)(0xC0 | encode)); } void Assembler::andnq(Register dst, Register src1, Address src2) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 8642,8675 **** emit_operand(dst, src2); } void Assembler::bsfq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBC); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bsrq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::bswapq(Register reg) { int encode = prefixq_and_encode(reg->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)(0xC8 | encode)); } void Assembler::blsiq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::blsiq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 8095,8122 ---- emit_operand(dst, src2); } void Assembler::bsfq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBC, (unsigned char)(0xC0 | encode)); } void Assembler::bsrq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); } void Assembler::bswapq(Register reg) { int encode = prefixq_and_encode(reg->encoding()); ! emit_int16(0x0F, (unsigned char)(0xC8 | encode)); } void Assembler::blsiq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::blsiq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 8681,8692 **** void Assembler::blsmskq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::blsmskq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 8128,8138 ---- void Assembler::blsmskq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::blsmskq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 8698,8709 **** void Assembler::blsrq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF3); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::blsrq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this); --- 8144,8154 ---- void Assembler::blsrq(Register dst, Register src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF3, (unsigned char)(0xC0 | encode)); } void Assembler::blsrq(Register dst, Address src) { assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported"); InstructionMark im(this);
*** 8712,8730 **** emit_int8((unsigned char)0xF3); emit_operand(rcx, src); } void Assembler::cdqq() { ! prefix(REX_W); ! emit_int8((unsigned char)0x99); } void Assembler::clflush(Address adr) { assert(VM_Version::supports_clflush(), "should do"); prefix(adr); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(rdi, adr); } void Assembler::clflushopt(Address adr) { assert(VM_Version::supports_clflushopt(), "should do!"); --- 8157,8173 ---- emit_int8((unsigned char)0xF3); emit_operand(rcx, src); } void Assembler::cdqq() { ! emit_int16(REX_W, (unsigned char)0x99); } void Assembler::clflush(Address adr) { assert(VM_Version::supports_clflush(), "should do"); prefix(adr); ! emit_int16(0x0F, (unsigned char)0xAE); emit_operand(rdi, adr); } void Assembler::clflushopt(Address adr) { assert(VM_Version::supports_clflushopt(), "should do!");
*** 8733,8745 **** assert(adr.scale() == Address::no_scale, "scale should be no_scale"); assert(adr.disp() == 0, "displacement should be 0"); // instruction prefix is 0x66 emit_int8(0x66); prefix(adr); ! // opcode family is 0x0f 0xAE ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); // extended opcode byte is 7 == rdi emit_operand(rdi, adr); } void Assembler::clwb(Address adr) { --- 8176,8187 ---- assert(adr.scale() == Address::no_scale, "scale should be no_scale"); assert(adr.disp() == 0, "displacement should be 0"); // instruction prefix is 0x66 emit_int8(0x66); prefix(adr); ! // opcode family is 0x0F 0xAE ! emit_int16(0x0F, (unsigned char)0xAE); // extended opcode byte is 7 == rdi emit_operand(rdi, adr); } void Assembler::clwb(Address adr) {
*** 8750,8784 **** assert(adr.disp() == 0, "displacement should be 0"); // instruction prefix is 0x66 emit_int8(0x66); prefix(adr); // opcode family is 0x0f 0xAE ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); // extended opcode byte is 6 == rsi emit_operand(rsi, adr); } void Assembler::cmovq(Condition cc, Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8(0x40 | cc); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cmovq(Condition cc, Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x0F); ! emit_int8(0x40 | cc); emit_operand(dst, src); } void Assembler::cmpq(Address dst, int32_t imm32) { InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0x81); emit_operand(rdi, dst, 4); emit_int32(imm32); } void Assembler::cmpq(Register dst, int32_t imm32) { --- 8192,8220 ---- assert(adr.disp() == 0, "displacement should be 0"); // instruction prefix is 0x66 emit_int8(0x66); prefix(adr); // opcode family is 0x0f 0xAE ! emit_int16(0x0F, (unsigned char)0xAE); // extended opcode byte is 6 == rsi emit_operand(rsi, adr); } void Assembler::cmovq(Condition cc, Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, 0x40 | cc, (unsigned char)(0xC0 | encode)); } void Assembler::cmovq(Condition cc, Register dst, Address src) { InstructionMark im(this); ! emit_int24(get_prefixq(src, dst), 0x0F, 0x40 | cc); emit_operand(dst, src); } void Assembler::cmpq(Address dst, int32_t imm32) { InstructionMark im(this); ! emit_int16(get_prefixq(dst), (unsigned char)0x81); emit_operand(rdi, dst, 4); emit_int32(imm32); } void Assembler::cmpq(Register dst, int32_t imm32) {
*** 8786,8826 **** emit_arith(0x81, 0xF8, dst, imm32); } void Assembler::cmpq(Address dst, Register src) { InstructionMark im(this); ! prefixq(dst, src); ! emit_int8(0x3B); emit_operand(src, dst); } void Assembler::cmpq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x3B, 0xC0, dst, src); } void Assembler::cmpq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x3B); emit_operand(dst, src); } void Assembler::cmpxchgq(Register reg, Address adr) { InstructionMark im(this); ! prefixq(adr, reg); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB1); emit_operand(reg, adr); } void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2A); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this); --- 8222,8257 ---- emit_arith(0x81, 0xF8, dst, imm32); } void Assembler::cmpq(Address dst, Register src) { InstructionMark im(this); ! emit_int16(get_prefixq(dst, src), 0x3B); emit_operand(src, dst); } void Assembler::cmpq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x3B, 0xC0, dst, src); } void Assembler::cmpq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x3B); emit_operand(dst, src); } void Assembler::cmpxchgq(Register reg, Address adr) { InstructionMark im(this); ! emit_int24(get_prefixq(adr, reg), 0x0F, (unsigned char)0xB1); emit_operand(reg, adr); } void Assembler::cvtsi2sdq(XMMRegister dst, Register src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2A, (unsigned char)(0xC0 | encode)); } void Assembler::cvtsi2sdq(XMMRegister dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionMark im(this);
*** 8844,8994 **** void Assembler::cvttsd2siq(Register dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); // F2 REX.W 0F 2C /r // CVTTSD2SI r64, xmm1/m64 InstructionMark im(this); ! emit_int8((unsigned char)0xF2); ! prefix(REX_W); ! emit_int8(0x0F); ! emit_int8(0x2C); emit_operand(dst, src); } void Assembler::cvttsd2siq(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::cvttss2siq(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int8(0x2C); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::decl(Register dst) { // Don't use it directly. Use MacroAssembler::decrementl() instead. // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xFF); ! emit_int8((unsigned char)(0xC8 | encode)); } void Assembler::decq(Register dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xFF); ! emit_int8(0xC8 | encode); } void Assembler::decq(Address dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0xFF); emit_operand(rcx, dst); } void Assembler::fxrstor(Address src) { ! prefixq(src); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(as_Register(1), src); } void Assembler::xrstor(Address src) { ! prefixq(src); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(as_Register(5), src); } void Assembler::fxsave(Address dst) { ! prefixq(dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(as_Register(0), dst); } void Assembler::xsave(Address dst) { ! prefixq(dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAE); emit_operand(as_Register(4), dst); } void Assembler::idivq(Register src) { int encode = prefixq_and_encode(src->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xF8 | encode)); } void Assembler::imulq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xAF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::imulq(Register dst, Register src, int value) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); if (is8bit(value)) { ! emit_int8(0x6B); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(value & 0xFF); } else { ! emit_int8(0x69); ! emit_int8((unsigned char)(0xC0 | encode)); emit_int32(value); } } void Assembler::imulq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char) 0xAF); emit_operand(dst, src); } void Assembler::incl(Register dst) { // Don't use it directly. Use MacroAssembler::incrementl() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) int encode = prefix_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xFF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::incq(Register dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xFF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::incq(Address dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0xFF); emit_operand(rax, dst); } void Assembler::lea(Register dst, Address src) { leaq(dst, src); } void Assembler::leaq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8((unsigned char)0x8D); emit_operand(dst, src); } void Assembler::mov64(Register dst, int64_t imm64) { InstructionMark im(this); --- 8275,8397 ---- void Assembler::cvttsd2siq(Register dst, Address src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); // F2 REX.W 0F 2C /r // CVTTSD2SI r64, xmm1/m64 InstructionMark im(this); ! emit_int32((unsigned char)0xF2, REX_W, 0x0F, 0x2C); emit_operand(dst, src); } void Assembler::cvttsd2siq(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2C, (unsigned char)(0xC0 | encode)); } void Assembler::cvttss2siq(Register dst, XMMRegister src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes); ! emit_int16(0x2C, (unsigned char)(0xC0 | encode)); } void Assembler::decl(Register dst) { // Don't use it directly. Use MacroAssembler::decrementl() instead. // Use two-byte form (one-byte form is a REX prefix in 64-bit mode) int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xFF, (unsigned char)(0xC8 | encode)); } void Assembler::decq(Register dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xFF, 0xC8 | encode); } void Assembler::decq(Address dst) { // Don't use it directly. Use MacroAssembler::decrementq() instead. InstructionMark im(this); ! emit_int16(get_prefixq(dst), (unsigned char)0xFF); emit_operand(rcx, dst); } void Assembler::fxrstor(Address src) { ! emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); emit_operand(as_Register(1), src); } void Assembler::xrstor(Address src) { ! emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE); emit_operand(as_Register(5), src); } void Assembler::fxsave(Address dst) { ! emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); emit_operand(as_Register(0), dst); } void Assembler::xsave(Address dst) { ! emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE); emit_operand(as_Register(4), dst); } void Assembler::idivq(Register src) { int encode = prefixq_and_encode(src->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xF8 | encode)); } void Assembler::imulq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xAF, (unsigned char)(0xC0 | encode)); } void Assembler::imulq(Register dst, Register src, int value) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); if (is8bit(value)) { ! emit_int24(0x6B, (unsigned char)(0xC0 | encode), value & 0xFF); } else { ! emit_int16(0x69, (unsigned char)(0xC0 | encode)); emit_int32(value); } } void Assembler::imulq(Register dst, Address src) { InstructionMark im(this); ! emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xAF); emit_operand(dst, src); } void Assembler::incl(Register dst) { // Don't use it directly. Use MacroAssembler::incrementl() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) int encode = prefix_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xFF, (unsigned char)(0xC0 | encode)); } void Assembler::incq(Register dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. // Use two-byte form (one-byte from is a REX prefix in 64-bit mode) int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xFF, (unsigned char)(0xC0 | encode)); } void Assembler::incq(Address dst) { // Don't use it directly. Use MacroAssembler::incrementq() instead. InstructionMark im(this); ! emit_int16(get_prefixq(dst), (unsigned char)0xFF); emit_operand(rax, dst); } void Assembler::lea(Register dst, Address src) { leaq(dst, src); } void Assembler::leaq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), (unsigned char)0x8D); emit_operand(dst, src); } void Assembler::mov64(Register dst, int64_t imm64) { InstructionMark im(this);
*** 9020,9031 **** } void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this); int encode = prefix_and_encode(src1->encoding()); ! emit_int8((unsigned char)0x81); ! emit_int8((unsigned char)(0xF8 | encode)); emit_data((int)imm32, rspec, narrow_oop_operand); } void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this); --- 8423,8433 ---- } void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this); int encode = prefix_and_encode(src1->encoding()); ! emit_int16((unsigned char)0x81, (unsigned char)(0xF8 | encode)); emit_data((int)imm32, rspec, narrow_oop_operand); } void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) { InstructionMark im(this);
*** 9037,9103 **** void Assembler::lzcntq(Register dst, Register src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBD); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movdq(XMMRegister dst, Register src) { // table D-1 says MMX/SSE2 NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x6E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movdq(Register dst, XMMRegister src) { // table D-1 says MMX/SSE2 NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); // swap src/dst to get correct prefix int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int8(0x7E); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x8B); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8((unsigned char)0x8B); emit_operand(dst, src); } void Assembler::movq(Address dst, Register src) { InstructionMark im(this); ! prefixq(dst, src); ! emit_int8((unsigned char)0x89); emit_operand(src, dst); } void Assembler::movsbq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBE); emit_operand(dst, src); } void Assembler::movsbq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBE); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movslq(Register dst, int32_t imm32) { // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx) --- 8439,8494 ---- void Assembler::lzcntq(Register dst, Register src) { assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBD, (unsigned char)(0xC0 | encode)); } void Assembler::movdq(XMMRegister dst, Register src) { // table D-1 says MMX/SSE2 NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x6E, (unsigned char)(0xC0 | encode)); } void Assembler::movdq(Register dst, XMMRegister src) { // table D-1 says MMX/SSE2 NOT_LP64(assert(VM_Version::supports_sse2(), "")); InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false); // swap src/dst to get correct prefix int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes); ! emit_int16(0x7E, (unsigned char)(0xC0 | encode)); } void Assembler::movq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int16((unsigned char)0x8B, (unsigned char)(0xC0 | encode)); } void Assembler::movq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), (unsigned char)0x8B); emit_operand(dst, src); } void Assembler::movq(Address dst, Register src) { InstructionMark im(this); ! emit_int16(get_prefixq(dst, src), (unsigned char)0x89); emit_operand(src, dst); } void Assembler::movsbq(Register dst, Address src) { InstructionMark im(this); ! emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xBE); emit_operand(dst, src); } void Assembler::movsbq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xBE, (unsigned char)(0xC0 | encode)); } void Assembler::movslq(Register dst, int32_t imm32) { // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx) // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
*** 9110,9240 **** } void Assembler::movslq(Address dst, int32_t imm32) { assert(is_simm32(imm32), "lost bits"); InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0xC7); emit_operand(rax, dst, 4); emit_int32(imm32); } void Assembler::movslq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x63); emit_operand(dst, src); } void Assembler::movslq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x63); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movswq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xBF); emit_operand(dst, src); } void Assembler::movswq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xBF); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::movzbq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xB6); emit_operand(dst, src); } void Assembler::movzbq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xB6); ! emit_int8(0xC0 | encode); } void Assembler::movzwq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xB7); emit_operand(dst, src); } void Assembler::movzwq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xB7); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::mulq(Address src) { InstructionMark im(this); ! prefixq(src); ! emit_int8((unsigned char)0xF7); emit_operand(rsp, src); } void Assembler::mulq(Register src) { int encode = prefixq_and_encode(src->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::mulxq(Register dst1, Register dst2, Register src) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); ! emit_int8((unsigned char)0xF6); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::negq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xD8 | encode)); } void Assembler::notq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xD0 | encode)); } void Assembler::btsq(Address dst, int imm8) { assert(isByte(imm8), "not a byte"); InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xBA); emit_operand(rbp /* 5 */, dst, 1); emit_int8(imm8); } void Assembler::btrq(Address dst, int imm8) { assert(isByte(imm8), "not a byte"); InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xBA); emit_operand(rsi /* 6 */, dst, 1); emit_int8(imm8); } void Assembler::orq(Address dst, int32_t imm32) { InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0x81); emit_operand(rcx, dst, 4); emit_int32(imm32); } void Assembler::orq(Register dst, int32_t imm32) { --- 8501,8606 ---- } void Assembler::movslq(Address dst, int32_t imm32) { assert(is_simm32(imm32), "lost bits"); InstructionMark im(this); ! emit_int16(get_prefixq(dst), (unsigned char)0xC7); emit_operand(rax, dst, 4); emit_int32(imm32); } void Assembler::movslq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x63); emit_operand(dst, src); } void Assembler::movslq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int16(0x63, (unsigned char)(0xC0 | encode)); } void Assembler::movswq(Register dst, Address src) { InstructionMark im(this); ! emit_int24(get_prefixq(src, dst), 0x0F, (unsigned char)0xBF); emit_operand(dst, src); } void Assembler::movswq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24((unsigned char)0x0F, (unsigned char)0xBF, (unsigned char)(0xC0 | encode)); } void Assembler::movzbq(Register dst, Address src) { InstructionMark im(this); ! emit_int24(get_prefixq(src, dst), (unsigned char)0x0F, (unsigned char)0xB6); emit_operand(dst, src); } void Assembler::movzbq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24(0x0F, (unsigned char)0xB6, 0xC0 | encode); } void Assembler::movzwq(Register dst, Address src) { InstructionMark im(this); ! emit_int24(get_prefixq(src, dst), (unsigned char)0x0F, (unsigned char)0xB7); emit_operand(dst, src); } void Assembler::movzwq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24((unsigned char)0x0F, (unsigned char)0xB7, (unsigned char)(0xC0 | encode)); } void Assembler::mulq(Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src), (unsigned char)0xF7); emit_operand(rsp, src); } void Assembler::mulq(Register src) { int encode = prefixq_and_encode(src->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xE0 | encode)); } void Assembler::mulxq(Register dst1, Register dst2, Register src) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes); ! emit_int16((unsigned char)0xF6, (unsigned char)(0xC0 | encode)); } void Assembler::negq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xD8 | encode)); } void Assembler::notq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xD0 | encode)); } void Assembler::btsq(Address dst, int imm8) { assert(isByte(imm8), "not a byte"); InstructionMark im(this); ! emit_int24(get_prefixq(dst), (unsigned char)0x0F, (unsigned char)0xBA); emit_operand(rbp /* 5 */, dst, 1); emit_int8(imm8); } void Assembler::btrq(Address dst, int imm8) { assert(isByte(imm8), "not a byte"); InstructionMark im(this); ! emit_int24(get_prefixq(dst), (unsigned char)0x0F, (unsigned char)0xBA); emit_operand(rsi /* 6 */, dst, 1); emit_int8(imm8); } void Assembler::orq(Address dst, int32_t imm32) { InstructionMark im(this); ! emit_int16(get_prefixq(dst), (unsigned char)0x81); emit_operand(rcx, dst, 4); emit_int32(imm32); } void Assembler::orq(Register dst, int32_t imm32) {
*** 9242,9253 **** emit_arith(0x81, 0xC8, dst, imm32); } void Assembler::orq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x0B); emit_operand(dst, src); } void Assembler::orq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); --- 8608,8618 ---- emit_arith(0x81, 0xC8, dst, imm32); } void Assembler::orq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x0B); emit_operand(dst, src); } void Assembler::orq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding());
*** 9255,9284 **** } void Assembler::popcntq(Register dst, Address src) { assert(VM_Version::supports_popcnt(), "must support"); InstructionMark im(this); ! emit_int8((unsigned char)0xF3); ! prefixq(src, dst); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xB8); emit_operand(dst, src); } void Assembler::popcntq(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x0F); ! emit_int8((unsigned char)0xB8); ! emit_int8((unsigned char)(0xC0 | encode)); } void Assembler::popq(Address dst) { InstructionMark im(this); ! prefixq(dst); ! emit_int8((unsigned char)0x8F); emit_operand(rax, dst); } // Precomputable: popa, pusha, vzeroupper --- 8620,8643 ---- } void Assembler::popcntq(Register dst, Address src) { assert(VM_Version::supports_popcnt(), "must support"); InstructionMark im(this); ! emit_int32((unsigned char)0xF3, get_prefixq(src, dst), (unsigned char)0x0F, (unsigned char)0xB8); emit_operand(dst, src); } void Assembler::popcntq(Register dst, Register src) { assert(VM_Version::supports_popcnt(), "must support"); emit_int8((unsigned char)0xF3); int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int24((unsigned char)0x0F, (unsigned char)0xB8, (unsigned char)(0xC0 | encode)); } void Assembler::popq(Address dst) { InstructionMark im(this); ! emit_int16(get_prefixq(dst), (unsigned char)0x8F); emit_operand(rax, dst); } // Precomputable: popa, pusha, vzeroupper
*** 9345,9354 **** --- 8704,8714 ---- address end = code_section->end(); memcpy(end, src, src_len); code_section->set_end(end + src_len); } + void Assembler::popa() { // 64bit emit_copy(code_section(), popa_code, popa_len); } void Assembler::popa_uncached() { // 64bit
*** 9405,9493 **** emit_copy(code_section(), vzup_code, vzup_len); } void Assembler::pushq(Address src) { InstructionMark im(this); ! prefixq(src); ! emit_int8((unsigned char)0xFF); emit_operand(rsi, src); } void Assembler::rclq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xD0 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xD0 | encode)); ! emit_int8(imm8); } } void Assembler::rcrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xD8 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xD8 | encode)); ! emit_int8(imm8); } } void Assembler::rorq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xC8 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xc8 | encode)); ! emit_int8(imm8); } } void Assembler::rorxq(Register dst, Register src, int imm8) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0xF0); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::rorxd(Register dst, Register src, int imm8) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); ! emit_int8((unsigned char)0xF0); ! emit_int8((unsigned char)(0xC0 | encode)); ! emit_int8(imm8); } void Assembler::sarq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xF8 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xF8 | encode)); ! emit_int8(imm8); } } void Assembler::sarq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xF8 | encode)); } void Assembler::sbbq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); --- 8765,8835 ---- emit_copy(code_section(), vzup_code, vzup_len); } void Assembler::pushq(Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src), (unsigned char)0xFF); emit_operand(rsi, src); } void Assembler::rclq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xD0 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xD0 | encode), imm8); } } void Assembler::rcrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xD8 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xD8 | encode), imm8); } } void Assembler::rorq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xC8 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xc8 | encode), imm8); } } void Assembler::rorxq(Register dst, Register src, int imm8) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0xF0, (unsigned char)(0xC0 | encode), imm8); } void Assembler::rorxd(Register dst, Register src, int imm8) { assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported"); InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false); int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes); ! emit_int24((unsigned char)0xF0, (unsigned char)(0xC0 | encode), imm8); } void Assembler::sarq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xF8 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xF8 | encode), imm8); } } void Assembler::sarq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xF8 | encode)); } void Assembler::sbbq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst);
*** 9499,9510 **** emit_arith(0x81, 0xD8, dst, imm32); } void Assembler::sbbq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x1B); emit_operand(dst, src); } void Assembler::sbbq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); --- 8841,8851 ---- emit_arith(0x81, 0xD8, dst, imm32); } void Assembler::sbbq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x1B); emit_operand(dst, src); } void Assembler::sbbq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding());
*** 9513,9561 **** void Assembler::shlq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int8((unsigned char)0xD1); ! emit_int8((unsigned char)(0xE0 | encode)); } else { ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xE0 | encode)); ! emit_int8(imm8); } } void Assembler::shlq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xD3); ! emit_int8((unsigned char)(0xE0 | encode)); } void Assembler::shrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xC1); ! emit_int8((unsigned char)(0xE8 | encode)); ! emit_int8(imm8); } void Assembler::shrq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int8((unsigned char)0xD3); ! emit_int8(0xE8 | encode); } void Assembler::subq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); emit_arith_operand(0x81, rbp, dst, imm32); } void Assembler::subq(Address dst, Register src) { InstructionMark im(this); ! prefixq(dst, src); ! emit_int8(0x29); emit_operand(src, dst); } void Assembler::subq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding()); --- 8854,8894 ---- void Assembler::shlq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); if (imm8 == 1) { ! emit_int16((unsigned char)0xD1, (unsigned char)(0xE0 | encode)); } else { ! emit_int24((unsigned char)0xC1, (unsigned char)(0xE0 | encode), imm8); } } void Assembler::shlq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xD3, (unsigned char)(0xE0 | encode)); } void Assembler::shrq(Register dst, int imm8) { assert(isShiftCount(imm8 >> 1), "illegal shift count"); int encode = prefixq_and_encode(dst->encoding()); ! emit_int24((unsigned char)0xC1, (unsigned char)(0xE8 | encode), imm8); } void Assembler::shrq(Register dst) { int encode = prefixq_and_encode(dst->encoding()); ! emit_int16((unsigned char)0xD3, 0xE8 | encode); } void Assembler::subq(Address dst, int32_t imm32) { InstructionMark im(this); prefixq(dst); emit_arith_operand(0x81, rbp, dst, imm32); } void Assembler::subq(Address dst, Register src) { InstructionMark im(this); ! emit_int16(get_prefixq(dst, src), 0x29); emit_operand(src, dst); } void Assembler::subq(Register dst, int32_t imm32) { (void) prefixq_and_encode(dst->encoding());
*** 9568,9579 **** emit_arith_imm32(0x81, 0xE8, dst, imm32); } void Assembler::subq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x2B); emit_operand(dst, src); } void Assembler::subq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); --- 8901,8911 ---- emit_arith_imm32(0x81, 0xE8, dst, imm32); } void Assembler::subq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x2B); emit_operand(dst, src); } void Assembler::subq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding());
*** 9584,9599 **** // not using emit_arith because test // doesn't support sign-extension of // 8bit operands int encode = dst->encoding(); if (encode == 0) { ! prefix(REX_W); ! emit_int8((unsigned char)0xA9); } else { encode = prefixq_and_encode(encode); ! emit_int8((unsigned char)0xF7); ! emit_int8((unsigned char)(0xC0 | encode)); } emit_int32(imm32); } void Assembler::testq(Register dst, Register src) { --- 8916,8929 ---- // not using emit_arith because test // doesn't support sign-extension of // 8bit operands int encode = dst->encoding(); if (encode == 0) { ! emit_int16(REX_W, (unsigned char)0xA9); } else { encode = prefixq_and_encode(encode); ! emit_int16((unsigned char)0xF7, (unsigned char)(0xC0 | encode)); } emit_int32(imm32); } void Assembler::testq(Register dst, Register src) {
*** 9601,9644 **** emit_arith(0x85, 0xC0, dst, src); } void Assembler::testq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8((unsigned char)0x85); emit_operand(dst, src); } void Assembler::xaddq(Address dst, Register src) { InstructionMark im(this); ! prefixq(dst, src); ! emit_int8(0x0F); ! emit_int8((unsigned char)0xC1); emit_operand(src, dst); } void Assembler::xchgq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8((unsigned char)0x87); emit_operand(dst, src); } void Assembler::xchgq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int8((unsigned char)0x87); ! emit_int8((unsigned char)(0xc0 | encode)); } void Assembler::xorq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x33, 0xC0, dst, src); } void Assembler::xorq(Register dst, Address src) { InstructionMark im(this); ! prefixq(src, dst); ! emit_int8(0x33); emit_operand(dst, src); } #endif // !LP64 --- 8931,8968 ---- emit_arith(0x85, 0xC0, dst, src); } void Assembler::testq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), (unsigned char)0x85); emit_operand(dst, src); } void Assembler::xaddq(Address dst, Register src) { InstructionMark im(this); ! emit_int24(get_prefixq(dst, src), 0x0F, (unsigned char)0xC1); emit_operand(src, dst); } void Assembler::xchgq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), (unsigned char)0x87); emit_operand(dst, src); } void Assembler::xchgq(Register dst, Register src) { int encode = prefixq_and_encode(dst->encoding(), src->encoding()); ! emit_int16((unsigned char)0x87, (unsigned char)(0xc0 | encode)); } void Assembler::xorq(Register dst, Register src) { (void) prefixq_and_encode(dst->encoding(), src->encoding()); emit_arith(0x33, 0xC0, dst, src); } void Assembler::xorq(Register dst, Address src) { InstructionMark im(this); ! emit_int16(get_prefixq(src, dst), 0x33); emit_operand(dst, src); } #endif // !LP64
< prev index next >