1330
1331 void Assembler::andl(Register dst, int32_t imm32) {
1332 prefix(dst);
1333 emit_arith(0x81, 0xE0, dst, imm32);
1334 }
1335
1336 void Assembler::andl(Register dst, Address src) {
1337 InstructionMark im(this);
1338 prefix(src, dst);
1339 emit_int8(0x23);
1340 emit_operand(dst, src);
1341 }
1342
1343 void Assembler::andl(Register dst, Register src) {
1344 (void) prefix_and_encode(dst->encoding(), src->encoding());
1345 emit_arith(0x23, 0xC0, dst, src);
1346 }
1347
1348 void Assembler::andnl(Register dst, Register src1, Register src2) {
1349 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1350 int encode = vex_prefix_0F38_and_encode(dst, src1, src2, false);
1351 emit_int8((unsigned char)0xF2);
1352 emit_int8((unsigned char)(0xC0 | encode));
1353 }
1354
1355 void Assembler::andnl(Register dst, Register src1, Address src2) {
1356 InstructionMark im(this);
1357 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1358 vex_prefix_0F38(dst, src1, src2, false);
1359 emit_int8((unsigned char)0xF2);
1360 emit_operand(dst, src2);
1361 }
1362
1363 void Assembler::bsfl(Register dst, Register src) {
1364 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1365 emit_int8(0x0F);
1366 emit_int8((unsigned char)0xBC);
1367 emit_int8((unsigned char)(0xC0 | encode));
1368 }
1369
1370 void Assembler::bsrl(Register dst, Register src) {
1371 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1372 emit_int8(0x0F);
1373 emit_int8((unsigned char)0xBD);
1374 emit_int8((unsigned char)(0xC0 | encode));
1375 }
1376
1377 void Assembler::bswapl(Register reg) { // bswap
1378 int encode = prefix_and_encode(reg->encoding());
1379 emit_int8(0x0F);
1380 emit_int8((unsigned char)(0xC8 | encode));
1381 }
1382
1383 void Assembler::blsil(Register dst, Register src) {
1384 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1385 int encode = vex_prefix_0F38_and_encode(rbx, dst, src, false);
1386 emit_int8((unsigned char)0xF3);
1387 emit_int8((unsigned char)(0xC0 | encode));
1388 }
1389
1390 void Assembler::blsil(Register dst, Address src) {
1391 InstructionMark im(this);
1392 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1393 vex_prefix_0F38(rbx, dst, src, false);
1394 emit_int8((unsigned char)0xF3);
1395 emit_operand(rbx, src);
1396 }
1397
1398 void Assembler::blsmskl(Register dst, Register src) {
1399 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1400 int encode = vex_prefix_0F38_and_encode(rdx, dst, src, false);
1401 emit_int8((unsigned char)0xF3);
1402 emit_int8((unsigned char)(0xC0 | encode));
1403 }
1404
1405 void Assembler::blsmskl(Register dst, Address src) {
1406 InstructionMark im(this);
1407 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1408 vex_prefix_0F38(rdx, dst, src, false);
1409 emit_int8((unsigned char)0xF3);
1410 emit_operand(rdx, src);
1411 }
1412
1413 void Assembler::blsrl(Register dst, Register src) {
1414 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1415 int encode = vex_prefix_0F38_and_encode(rcx, dst, src, false);
1416 emit_int8((unsigned char)0xF3);
1417 emit_int8((unsigned char)(0xC0 | encode));
1418 }
1419
1420 void Assembler::blsrl(Register dst, Address src) {
1421 InstructionMark im(this);
1422 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1423 vex_prefix_0F38(rcx, dst, src, false);
1424 emit_int8((unsigned char)0xF3);
1425 emit_operand(rcx, src);
1426 }
1427
1428 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1429 // suspect disp32 is always good
1430 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
1431
1432 if (L.is_bound()) {
1433 const int long_size = 5;
1434 int offs = (int)( target(L) - pc() );
1435 assert(offs <= 0, "assembler error");
1436 InstructionMark im(this);
1437 // 1110 1000 #32-bit disp
1438 emit_int8((unsigned char)0xE8);
1439 emit_data(offs - long_size, rtype, operand);
1440 } else {
1441 InstructionMark im(this);
1442 // 1110 1000 #32-bit disp
1443 L.add_patch_at(code(), locator());
3097 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3098 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
3099 emit_int8(0x73);
3100 emit_int8((unsigned char)(0xC0 | encode));
3101 emit_int8(shift);
3102 }
3103
3104 void Assembler::pslldq(XMMRegister dst, int shift) {
3105 // Shift left 128 bit value in xmm register by number of bytes.
3106 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3107 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
3108 emit_int8(0x73);
3109 emit_int8((unsigned char)(0xC0 | encode));
3110 emit_int8(shift);
3111 }
3112
3113 void Assembler::ptest(XMMRegister dst, Address src) {
3114 assert(VM_Version::supports_sse4_1(), "");
3115 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3116 InstructionMark im(this);
3117 simd_prefix(dst, src, VEX_SIMD_66, false, VEX_OPCODE_0F_38);
3118 emit_int8(0x17);
3119 emit_operand(dst, src);
3120 }
3121
3122 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
3123 assert(VM_Version::supports_sse4_1(), "");
3124 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
3125 false, VEX_OPCODE_0F_38);
3126 emit_int8(0x17);
3127 emit_int8((unsigned char)(0xC0 | encode));
3128 }
3129
3130 void Assembler::vptest(XMMRegister dst, Address src) {
3131 assert(VM_Version::supports_avx(), "");
3132 InstructionMark im(this);
3133 int vector_len = AVX_256bit;
3134 assert(dst != xnoreg, "sanity");
3135 int dst_enc = dst->encoding();
3136 // swap src<->dst for encoding
3137 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
3138 emit_int8(0x17);
3139 emit_operand(dst, src);
3140 }
3141
3142 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
3143 assert(VM_Version::supports_avx(), "");
3144 int vector_len = AVX_256bit;
3145 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
3146 vector_len, VEX_OPCODE_0F_38);
3147 emit_int8(0x17);
3148 emit_int8((unsigned char)(0xC0 | encode));
3149 }
3150
3151 void Assembler::punpcklbw(XMMRegister dst, Address src) {
3152 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3153 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3154 if (VM_Version::supports_evex()) {
3155 tuple_type = EVEX_FVM;
3156 }
3157 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
3158 }
3159
3160 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3161 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3162 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
3163 }
3164
3165 void Assembler::punpckldq(XMMRegister dst, Address src) {
3166 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3167 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3168 if (VM_Version::supports_evex()) {
3169 tuple_type = EVEX_FV;
3170 input_size_in_bits = EVEX_32bit;
3171 }
3172 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
3173 }
3174
3175 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
3176 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3177 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
3178 }
3179
3180 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
3181 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3182 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
4970 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A,
4971 VM_Version::supports_avx512dq(), vector_len, false, false);
4972 emit_int8(0x19);
4973 emit_int8((unsigned char)(0xC0 | encode));
4974 // 0x01 - extract from bits 255:128
4975 // 0x02 - extract from bits 383:256
4976 // 0x03 - extract from bits 511:384
4977 emit_int8(value & 0x3);
4978 }
4979
4980 // duplicate 4-bytes integer data from src into 8 locations in dest
4981 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
4982 assert(VM_Version::supports_avx2(), "");
4983 int vector_len = AVX_256bit;
4984 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
4985 vector_len, VEX_OPCODE_0F_38, false);
4986 emit_int8(0x58);
4987 emit_int8((unsigned char)(0xC0 | encode));
4988 }
4989
4990 // duplicate 4-bytes integer data from src into 8 locations in dest
4991 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
4992 assert(VM_Version::supports_evex(), "");
4993 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
4994 vector_len, VEX_OPCODE_0F_38, false);
4995 emit_int8(0x58);
4996 emit_int8((unsigned char)(0xC0 | encode));
4997 }
4998
4999 // Carry-Less Multiplication Quadword
5000 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
5001 assert(VM_Version::supports_clmul(), "");
5002 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false,
5003 VEX_OPCODE_0F_3A, false, AVX_128bit, true);
5004 emit_int8(0x44);
5005 emit_int8((unsigned char)(0xC0 | encode));
5006 emit_int8((unsigned char)mask);
5007 }
5008
5009 // Carry-Less Multiplication Quadword
5010 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
5011 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
5012 int vector_len = AVX_128bit;
5013 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66,
5014 vector_len, VEX_OPCODE_0F_3A, true);
5015 emit_int8(0x44);
5016 emit_int8((unsigned char)(0xC0 | encode));
5017 emit_int8((unsigned char)mask);
5018 }
5589 // confine pre opcode extensions in pp bits to lower two bits
5590 // of form {66, F3, F2}
5591 byte3 |= pre;
5592 emit_int8(byte3);
5593
5594 // P2: byte 4 as zL'Lbv'aaa
5595 int byte4 = (no_mask_reg) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now)
5596 // EVEX.v` for extending EVEX.vvvv or VIDX
5597 byte4 |= (evex_v ? 0: EVEX_V);
5598 // third EXEC.b for broadcast actions
5599 byte4 |= (is_extended_context ? EVEX_Rb : 0);
5600 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
5601 byte4 |= ((vector_len) & 0x3) << 5;
5602 // last is EVEX.z for zero/merge actions
5603 byte4 |= (is_merge_context ? EVEX_Z : 0);
5604 emit_int8(byte4);
5605 }
5606
5607 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre,
5608 VexOpcode opc, bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg) {
5609 bool vex_r = (xreg_enc >= 8);
5610 bool vex_b = adr.base_needs_rex();
5611 bool vex_x = adr.index_needs_rex();
5612 avx_vector_len = vector_len;
5613
5614 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit
5615 if (VM_Version::supports_avx512vl() == false) {
5616 switch (vector_len) {
5617 case AVX_128bit:
5618 case AVX_256bit:
5619 legacy_mode = true;
5620 break;
5621 }
5622 }
5623
5624 if ((UseAVX > 2) && (legacy_mode == false))
5625 {
5626 bool evex_r = (xreg_enc >= 16);
5627 bool evex_v = (nds_enc >= 16);
5628 is_evex_instruction = true;
5629 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg);
5630 } else {
5631 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len);
5632 }
5633 }
5634
5635 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
5636 bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg ) {
5637 bool vex_r = (dst_enc >= 8);
5638 bool vex_b = (src_enc >= 8);
5639 bool vex_x = false;
5640 avx_vector_len = vector_len;
5641
5642 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit
5643 if (VM_Version::supports_avx512vl() == false) {
5644 switch (vector_len) {
5645 case AVX_128bit:
5646 case AVX_256bit:
5647 legacy_mode = true;
5648 break;
5649 }
5650 }
5651
5652 if ((UseAVX > 2) && (legacy_mode == false))
5653 {
5654 bool evex_r = (dst_enc >= 16);
5655 bool evex_v = (nds_enc >= 16);
5656 // can use vex_x as bank extender on rm encoding
5657 vex_x = (src_enc >= 16);
5658 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg);
6263
6264 void Assembler::andq(Register dst, int32_t imm32) {
6265 (void) prefixq_and_encode(dst->encoding());
6266 emit_arith(0x81, 0xE0, dst, imm32);
6267 }
6268
6269 void Assembler::andq(Register dst, Address src) {
6270 InstructionMark im(this);
6271 prefixq(src, dst);
6272 emit_int8(0x23);
6273 emit_operand(dst, src);
6274 }
6275
6276 void Assembler::andq(Register dst, Register src) {
6277 (void) prefixq_and_encode(dst->encoding(), src->encoding());
6278 emit_arith(0x23, 0xC0, dst, src);
6279 }
6280
6281 void Assembler::andnq(Register dst, Register src1, Register src2) {
6282 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6283 int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
6284 emit_int8((unsigned char)0xF2);
6285 emit_int8((unsigned char)(0xC0 | encode));
6286 }
6287
6288 void Assembler::andnq(Register dst, Register src1, Address src2) {
6289 if (VM_Version::supports_evex()) {
6290 tuple_type = EVEX_T1S;
6291 input_size_in_bits = EVEX_64bit;
6292 }
6293 InstructionMark im(this);
6294 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6295 vex_prefix_0F38_q(dst, src1, src2);
6296 emit_int8((unsigned char)0xF2);
6297 emit_operand(dst, src2);
6298 }
6299
6300 void Assembler::bsfq(Register dst, Register src) {
6301 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
6302 emit_int8(0x0F);
6303 emit_int8((unsigned char)0xBC);
6304 emit_int8((unsigned char)(0xC0 | encode));
6305 }
6306
6307 void Assembler::bsrq(Register dst, Register src) {
6308 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
6309 emit_int8(0x0F);
6310 emit_int8((unsigned char)0xBD);
6311 emit_int8((unsigned char)(0xC0 | encode));
6312 }
6313
6314 void Assembler::bswapq(Register reg) {
6315 int encode = prefixq_and_encode(reg->encoding());
6316 emit_int8(0x0F);
6317 emit_int8((unsigned char)(0xC8 | encode));
6318 }
6319
6320 void Assembler::blsiq(Register dst, Register src) {
6321 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6322 int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
6323 emit_int8((unsigned char)0xF3);
6324 emit_int8((unsigned char)(0xC0 | encode));
6325 }
6326
6327 void Assembler::blsiq(Register dst, Address src) {
6328 InstructionMark im(this);
6329 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6330 vex_prefix_0F38_q(rbx, dst, src);
6331 emit_int8((unsigned char)0xF3);
6332 emit_operand(rbx, src);
6333 }
6334
6335 void Assembler::blsmskq(Register dst, Register src) {
6336 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6337 int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
6338 emit_int8((unsigned char)0xF3);
6339 emit_int8((unsigned char)(0xC0 | encode));
6340 }
6341
6342 void Assembler::blsmskq(Register dst, Address src) {
6343 InstructionMark im(this);
6344 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6345 vex_prefix_0F38_q(rdx, dst, src);
6346 emit_int8((unsigned char)0xF3);
6347 emit_operand(rdx, src);
6348 }
6349
6350 void Assembler::blsrq(Register dst, Register src) {
6351 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6352 int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
6353 emit_int8((unsigned char)0xF3);
6354 emit_int8((unsigned char)(0xC0 | encode));
6355 }
6356
6357 void Assembler::blsrq(Register dst, Address src) {
6358 InstructionMark im(this);
6359 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6360 vex_prefix_0F38_q(rcx, dst, src);
6361 emit_int8((unsigned char)0xF3);
6362 emit_operand(rcx, src);
6363 }
6364
6365 void Assembler::cdqq() {
6366 prefix(REX_W);
6367 emit_int8((unsigned char)0x99);
6368 }
6369
6370 void Assembler::clflush(Address adr) {
6371 prefix(adr);
6372 emit_int8(0x0F);
6373 emit_int8((unsigned char)0xAE);
6374 emit_operand(rdi, adr);
6375 }
6376
6377 void Assembler::cmovq(Condition cc, Register dst, Register src) {
6378 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
6379 emit_int8(0x0F);
6380 emit_int8(0x40 | cc);
|
1330
1331 void Assembler::andl(Register dst, int32_t imm32) {
1332 prefix(dst);
1333 emit_arith(0x81, 0xE0, dst, imm32);
1334 }
1335
1336 void Assembler::andl(Register dst, Address src) {
1337 InstructionMark im(this);
1338 prefix(src, dst);
1339 emit_int8(0x23);
1340 emit_operand(dst, src);
1341 }
1342
1343 void Assembler::andl(Register dst, Register src) {
1344 (void) prefix_and_encode(dst->encoding(), src->encoding());
1345 emit_arith(0x23, 0xC0, dst, src);
1346 }
1347
1348 void Assembler::andnl(Register dst, Register src1, Register src2) {
1349 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1350 int encode = vex_prefix_0F38_and_encode_legacy(dst, src1, src2, false);
1351 emit_int8((unsigned char)0xF2);
1352 emit_int8((unsigned char)(0xC0 | encode));
1353 }
1354
1355 void Assembler::andnl(Register dst, Register src1, Address src2) {
1356 InstructionMark im(this);
1357 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1358 vex_prefix_0F38_legacy(dst, src1, src2, false);
1359 emit_int8((unsigned char)0xF2);
1360 emit_operand(dst, src2);
1361 }
1362
1363 void Assembler::bsfl(Register dst, Register src) {
1364 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1365 emit_int8(0x0F);
1366 emit_int8((unsigned char)0xBC);
1367 emit_int8((unsigned char)(0xC0 | encode));
1368 }
1369
1370 void Assembler::bsrl(Register dst, Register src) {
1371 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1372 emit_int8(0x0F);
1373 emit_int8((unsigned char)0xBD);
1374 emit_int8((unsigned char)(0xC0 | encode));
1375 }
1376
1377 void Assembler::bswapl(Register reg) { // bswap
1378 int encode = prefix_and_encode(reg->encoding());
1379 emit_int8(0x0F);
1380 emit_int8((unsigned char)(0xC8 | encode));
1381 }
1382
1383 void Assembler::blsil(Register dst, Register src) {
1384 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1385 int encode = vex_prefix_0F38_and_encode_legacy(rbx, dst, src, false);
1386 emit_int8((unsigned char)0xF3);
1387 emit_int8((unsigned char)(0xC0 | encode));
1388 }
1389
1390 void Assembler::blsil(Register dst, Address src) {
1391 InstructionMark im(this);
1392 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1393 vex_prefix_0F38_legacy(rbx, dst, src, false);
1394 emit_int8((unsigned char)0xF3);
1395 emit_operand(rbx, src);
1396 }
1397
1398 void Assembler::blsmskl(Register dst, Register src) {
1399 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1400 int encode = vex_prefix_0F38_and_encode_legacy(rdx, dst, src, false);
1401 emit_int8((unsigned char)0xF3);
1402 emit_int8((unsigned char)(0xC0 | encode));
1403 }
1404
1405 void Assembler::blsmskl(Register dst, Address src) {
1406 InstructionMark im(this);
1407 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1408 vex_prefix_0F38(rdx, dst, src, false);
1409 emit_int8((unsigned char)0xF3);
1410 emit_operand(rdx, src);
1411 }
1412
1413 void Assembler::blsrl(Register dst, Register src) {
1414 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1415 int encode = vex_prefix_0F38_and_encode_legacy(rcx, dst, src, false);
1416 emit_int8((unsigned char)0xF3);
1417 emit_int8((unsigned char)(0xC0 | encode));
1418 }
1419
1420 void Assembler::blsrl(Register dst, Address src) {
1421 InstructionMark im(this);
1422 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1423 vex_prefix_0F38_legacy(rcx, dst, src, false);
1424 emit_int8((unsigned char)0xF3);
1425 emit_operand(rcx, src);
1426 }
1427
1428 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1429 // suspect disp32 is always good
1430 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
1431
1432 if (L.is_bound()) {
1433 const int long_size = 5;
1434 int offs = (int)( target(L) - pc() );
1435 assert(offs <= 0, "assembler error");
1436 InstructionMark im(this);
1437 // 1110 1000 #32-bit disp
1438 emit_int8((unsigned char)0xE8);
1439 emit_data(offs - long_size, rtype, operand);
1440 } else {
1441 InstructionMark im(this);
1442 // 1110 1000 #32-bit disp
1443 L.add_patch_at(code(), locator());
3097 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3098 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
3099 emit_int8(0x73);
3100 emit_int8((unsigned char)(0xC0 | encode));
3101 emit_int8(shift);
3102 }
3103
3104 void Assembler::pslldq(XMMRegister dst, int shift) {
3105 // Shift left 128 bit value in xmm register by number of bytes.
3106 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3107 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, true, VEX_OPCODE_0F, false, AVX_128bit, (VM_Version::supports_avx512bw() == false));
3108 emit_int8(0x73);
3109 emit_int8((unsigned char)(0xC0 | encode));
3110 emit_int8(shift);
3111 }
3112
3113 void Assembler::ptest(XMMRegister dst, Address src) {
3114 assert(VM_Version::supports_sse4_1(), "");
3115 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3116 InstructionMark im(this);
3117 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, false,
3118 VEX_OPCODE_0F_38, false, AVX_128bit, true);
3119 emit_int8(0x17);
3120 emit_operand(dst, src);
3121 }
3122
3123 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
3124 assert(VM_Version::supports_sse4_1(), "");
3125 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, false,
3126 VEX_OPCODE_0F_38, false, AVX_128bit, true);
3127 emit_int8(0x17);
3128 emit_int8((unsigned char)(0xC0 | encode));
3129 }
3130
3131 void Assembler::vptest(XMMRegister dst, Address src) {
3132 assert(VM_Version::supports_avx(), "");
3133 InstructionMark im(this);
3134 int vector_len = AVX_256bit;
3135 assert(dst != xnoreg, "sanity");
3136 int dst_enc = dst->encoding();
3137 // swap src<->dst for encoding
3138 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len, true, false);
3139 emit_int8(0x17);
3140 emit_operand(dst, src);
3141 }
3142
3143 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
3144 assert(VM_Version::supports_avx(), "");
3145 int vector_len = AVX_256bit;
3146 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
3147 vector_len, VEX_OPCODE_0F_38, true, false);
3148 emit_int8(0x17);
3149 emit_int8((unsigned char)(0xC0 | encode));
3150 }
3151
3152 void Assembler::punpcklbw(XMMRegister dst, Address src) {
3153 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3154 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3155 if (VM_Version::supports_evex()) {
3156 tuple_type = EVEX_FVM;
3157 }
3158 emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false));
3159 }
3160
3161 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3162 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3163 emit_simd_arith(0x60, dst, src, VEX_SIMD_66, false, (VM_Version::supports_avx512vlbw() == false));
3164 }
3165
3166 void Assembler::punpckldq(XMMRegister dst, Address src) {
3167 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3168 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3169 if (VM_Version::supports_evex()) {
3170 tuple_type = EVEX_FV;
3171 input_size_in_bits = EVEX_32bit;
3172 }
3173 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
3174 }
3175
3176 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
3177 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3178 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
3179 }
3180
3181 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
3182 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3183 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
4971 int encode = vex_prefix_and_encode(src_enc, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A,
4972 VM_Version::supports_avx512dq(), vector_len, false, false);
4973 emit_int8(0x19);
4974 emit_int8((unsigned char)(0xC0 | encode));
4975 // 0x01 - extract from bits 255:128
4976 // 0x02 - extract from bits 383:256
4977 // 0x03 - extract from bits 511:384
4978 emit_int8(value & 0x3);
4979 }
4980
4981 // duplicate 4-bytes integer data from src into 8 locations in dest
4982 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
4983 assert(VM_Version::supports_avx2(), "");
4984 int vector_len = AVX_256bit;
4985 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
4986 vector_len, VEX_OPCODE_0F_38, false);
4987 emit_int8(0x58);
4988 emit_int8((unsigned char)(0xC0 | encode));
4989 }
4990
4991 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
4992 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
4993 assert(VM_Version::supports_evex(), "");
4994 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
4995 vector_len, VEX_OPCODE_0F_38, false);
4996 emit_int8(0x78);
4997 emit_int8((unsigned char)(0xC0 | encode));
4998 }
4999
5000 void Assembler::evpbroadcastb(XMMRegister dst, Address src, int vector_len) {
5001 assert(VM_Version::supports_evex(), "");
5002 tuple_type = EVEX_T1S;
5003 input_size_in_bits = EVEX_8bit;
5004 InstructionMark im(this);
5005 assert(dst != xnoreg, "sanity");
5006 int dst_enc = dst->encoding();
5007 // swap src<->dst for encoding
5008 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
5009 emit_int8(0x78);
5010 emit_operand(dst, src);
5011 }
5012
5013 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL
5014 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
5015 assert(VM_Version::supports_evex(), "");
5016 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
5017 vector_len, VEX_OPCODE_0F_38, false);
5018 emit_int8(0x79);
5019 emit_int8((unsigned char)(0xC0 | encode));
5020 }
5021
5022 void Assembler::evpbroadcastw(XMMRegister dst, Address src, int vector_len) {
5023 assert(VM_Version::supports_evex(), "");
5024 tuple_type = EVEX_T1S;
5025 input_size_in_bits = EVEX_16bit;
5026 InstructionMark im(this);
5027 assert(dst != xnoreg, "sanity");
5028 int dst_enc = dst->encoding();
5029 // swap src<->dst for encoding
5030 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
5031 emit_int8(0x79);
5032 emit_operand(dst, src);
5033 }
5034
5035 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
5036 void Assembler::evpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
5037 assert(VM_Version::supports_evex(), "");
5038 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66,
5039 vector_len, VEX_OPCODE_0F_38, false);
5040 emit_int8(0x58);
5041 emit_int8((unsigned char)(0xC0 | encode));
5042 }
5043
5044 void Assembler::evpbroadcastd(XMMRegister dst, Address src, int vector_len) {
5045 assert(VM_Version::supports_evex(), "");
5046 tuple_type = EVEX_T1S;
5047 input_size_in_bits = EVEX_32bit;
5048 InstructionMark im(this);
5049 assert(dst != xnoreg, "sanity");
5050 int dst_enc = dst->encoding();
5051 // swap src<->dst for encoding
5052 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
5053 emit_int8(0x58);
5054 emit_operand(dst, src);
5055 }
5056
5057 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
5058 void Assembler::evpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
5059 assert(VM_Version::supports_evex(), "");
5060 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5061 VEX_OPCODE_0F_38, true, vector_len, false, false);
5062 emit_int8(0x59);
5063 emit_int8((unsigned char)(0xC0 | encode));
5064 }
5065
5066 void Assembler::evpbroadcastq(XMMRegister dst, Address src, int vector_len) {
5067 assert(VM_Version::supports_evex(), "");
5068 tuple_type = EVEX_T1S;
5069 input_size_in_bits = EVEX_64bit;
5070 InstructionMark im(this);
5071 assert(dst != xnoreg, "sanity");
5072 int dst_enc = dst->encoding();
5073 // swap src<->dst for encoding
5074 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len);
5075 emit_int8(0x59);
5076 emit_operand(dst, src);
5077 }
5078
5079 // duplicate single precision fp from src into 4|8|16 locations in dest : requires AVX512VL
5080 void Assembler::evpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
5081 assert(VM_Version::supports_evex(), "");
5082 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5083 VEX_OPCODE_0F_38, false, vector_len, false, false);
5084 emit_int8(0x18);
5085 emit_int8((unsigned char)(0xC0 | encode));
5086 }
5087
5088 void Assembler::evpbroadcastss(XMMRegister dst, Address src, int vector_len) {
5089 assert(VM_Version::supports_evex(), "");
5090 tuple_type = EVEX_T1S;
5091 input_size_in_bits = EVEX_32bit;
5092 InstructionMark im(this);
5093 assert(dst != xnoreg, "sanity");
5094 int dst_enc = dst->encoding();
5095 // swap src<->dst for encoding
5096 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector_len);
5097 emit_int8(0x18);
5098 emit_operand(dst, src);
5099 }
5100
5101 // duplicate double precision fp from src into 2|4|8 locations in dest : requires AVX512VL
5102 void Assembler::evpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
5103 assert(VM_Version::supports_evex(), "");
5104 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5105 VEX_OPCODE_0F_38, true, vector_len, false, false);
5106 emit_int8(0x19);
5107 emit_int8((unsigned char)(0xC0 | encode));
5108 }
5109
5110 void Assembler::evpbroadcastsd(XMMRegister dst, Address src, int vector_len) {
5111 assert(VM_Version::supports_evex(), "");
5112 tuple_type = EVEX_T1S;
5113 input_size_in_bits = EVEX_64bit;
5114 InstructionMark im(this);
5115 assert(dst != xnoreg, "sanity");
5116 int dst_enc = dst->encoding();
5117 // swap src<->dst for encoding
5118 vex_prefix(src, 0, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, true, vector_len);
5119 emit_int8(0x19);
5120 emit_operand(dst, src);
5121 }
5122
5123 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
5124 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
5125 assert(VM_Version::supports_evex(), "");
5126 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5127 VEX_OPCODE_0F_38, false, vector_len, false, false);
5128 emit_int8(0x7A);
5129 emit_int8((unsigned char)(0xC0 | encode));
5130 }
5131
5132 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL
5133 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
5134 assert(VM_Version::supports_evex(), "");
5135 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5136 VEX_OPCODE_0F_38, false, vector_len, false, false);
5137 emit_int8(0x7B);
5138 emit_int8((unsigned char)(0xC0 | encode));
5139 }
5140
5141 // duplicate 4-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
5142 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
5143 assert(VM_Version::supports_evex(), "");
5144 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5145 VEX_OPCODE_0F_38, false, vector_len, false, false);
5146 emit_int8(0x7C);
5147 emit_int8((unsigned char)(0xC0 | encode));
5148 }
5149
5150 // duplicate 8-byte integer data from src into 4|8|16 locations in dest : requires AVX512VL
5151 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
5152 assert(VM_Version::supports_evex(), "");
5153 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66,
5154 VEX_OPCODE_0F_38, true, vector_len, false, false);
5155 emit_int8(0x7C);
5156 emit_int8((unsigned char)(0xC0 | encode));
5157 }
5158
5159 // Carry-Less Multiplication Quadword
5160 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
5161 assert(VM_Version::supports_clmul(), "");
5162 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, false,
5163 VEX_OPCODE_0F_3A, false, AVX_128bit, true);
5164 emit_int8(0x44);
5165 emit_int8((unsigned char)(0xC0 | encode));
5166 emit_int8((unsigned char)mask);
5167 }
5168
5169 // Carry-Less Multiplication Quadword
5170 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
5171 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
5172 int vector_len = AVX_128bit;
5173 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66,
5174 vector_len, VEX_OPCODE_0F_3A, true);
5175 emit_int8(0x44);
5176 emit_int8((unsigned char)(0xC0 | encode));
5177 emit_int8((unsigned char)mask);
5178 }
5749 // confine pre opcode extensions in pp bits to lower two bits
5750 // of form {66, F3, F2}
5751 byte3 |= pre;
5752 emit_int8(byte3);
5753
5754 // P2: byte 4 as zL'Lbv'aaa
5755 int byte4 = (no_mask_reg) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now)
5756 // EVEX.v` for extending EVEX.vvvv or VIDX
5757 byte4 |= (evex_v ? 0: EVEX_V);
5758 // third EXEC.b for broadcast actions
5759 byte4 |= (is_extended_context ? EVEX_Rb : 0);
5760 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
5761 byte4 |= ((vector_len) & 0x3) << 5;
5762 // last is EVEX.z for zero/merge actions
5763 byte4 |= (is_merge_context ? EVEX_Z : 0);
5764 emit_int8(byte4);
5765 }
5766
5767 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre,
5768 VexOpcode opc, bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg) {
5769 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0;
5770 bool vex_b = adr.base_needs_rex();
5771 bool vex_x = adr.index_needs_rex();
5772 avx_vector_len = vector_len;
5773
5774 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit
5775 if (VM_Version::supports_avx512vl() == false) {
5776 switch (vector_len) {
5777 case AVX_128bit:
5778 case AVX_256bit:
5779 legacy_mode = true;
5780 break;
5781 }
5782 }
5783
5784 if ((UseAVX > 2) && (legacy_mode == false))
5785 {
5786 bool evex_r = (xreg_enc >= 16);
5787 bool evex_v = (nds_enc >= 16);
5788 is_evex_instruction = true;
5789 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg);
5790 } else {
5791 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector_len);
5792 }
5793 }
5794
5795 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc,
5796 bool vex_w, int vector_len, bool legacy_mode, bool no_mask_reg ) {
5797 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0;
5798 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0;
5799 bool vex_x = false;
5800 avx_vector_len = vector_len;
5801
5802 // if vector length is turned off, revert to AVX for vectors smaller than AVX_512bit
5803 if (VM_Version::supports_avx512vl() == false) {
5804 switch (vector_len) {
5805 case AVX_128bit:
5806 case AVX_256bit:
5807 legacy_mode = true;
5808 break;
5809 }
5810 }
5811
5812 if ((UseAVX > 2) && (legacy_mode == false))
5813 {
5814 bool evex_r = (dst_enc >= 16);
5815 bool evex_v = (nds_enc >= 16);
5816 // can use vex_x as bank extender on rm encoding
5817 vex_x = (src_enc >= 16);
5818 evex_prefix(vex_r, vex_b, vex_x, vex_w, evex_r, evex_v, nds_enc, pre, opc, false, false, vector_len, no_mask_reg);
6423
6424 void Assembler::andq(Register dst, int32_t imm32) {
6425 (void) prefixq_and_encode(dst->encoding());
6426 emit_arith(0x81, 0xE0, dst, imm32);
6427 }
6428
6429 void Assembler::andq(Register dst, Address src) {
6430 InstructionMark im(this);
6431 prefixq(src, dst);
6432 emit_int8(0x23);
6433 emit_operand(dst, src);
6434 }
6435
6436 void Assembler::andq(Register dst, Register src) {
6437 (void) prefixq_and_encode(dst->encoding(), src->encoding());
6438 emit_arith(0x23, 0xC0, dst, src);
6439 }
6440
6441 void Assembler::andnq(Register dst, Register src1, Register src2) {
6442 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6443 int encode = vex_prefix_0F38_and_encode_q_legacy(dst, src1, src2);
6444 emit_int8((unsigned char)0xF2);
6445 emit_int8((unsigned char)(0xC0 | encode));
6446 }
6447
6448 void Assembler::andnq(Register dst, Register src1, Address src2) {
6449 InstructionMark im(this);
6450 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6451 vex_prefix_0F38_q_legacy(dst, src1, src2);
6452 emit_int8((unsigned char)0xF2);
6453 emit_operand(dst, src2);
6454 }
6455
6456 void Assembler::bsfq(Register dst, Register src) {
6457 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
6458 emit_int8(0x0F);
6459 emit_int8((unsigned char)0xBC);
6460 emit_int8((unsigned char)(0xC0 | encode));
6461 }
6462
6463 void Assembler::bsrq(Register dst, Register src) {
6464 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
6465 emit_int8(0x0F);
6466 emit_int8((unsigned char)0xBD);
6467 emit_int8((unsigned char)(0xC0 | encode));
6468 }
6469
6470 void Assembler::bswapq(Register reg) {
6471 int encode = prefixq_and_encode(reg->encoding());
6472 emit_int8(0x0F);
6473 emit_int8((unsigned char)(0xC8 | encode));
6474 }
6475
6476 void Assembler::blsiq(Register dst, Register src) {
6477 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6478 int encode = vex_prefix_0F38_and_encode_q_legacy(rbx, dst, src);
6479 emit_int8((unsigned char)0xF3);
6480 emit_int8((unsigned char)(0xC0 | encode));
6481 }
6482
6483 void Assembler::blsiq(Register dst, Address src) {
6484 InstructionMark im(this);
6485 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6486 vex_prefix_0F38_q_legacy(rbx, dst, src);
6487 emit_int8((unsigned char)0xF3);
6488 emit_operand(rbx, src);
6489 }
6490
6491 void Assembler::blsmskq(Register dst, Register src) {
6492 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6493 int encode = vex_prefix_0F38_and_encode_q_legacy(rdx, dst, src);
6494 emit_int8((unsigned char)0xF3);
6495 emit_int8((unsigned char)(0xC0 | encode));
6496 }
6497
6498 void Assembler::blsmskq(Register dst, Address src) {
6499 InstructionMark im(this);
6500 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6501 vex_prefix_0F38_q_legacy(rdx, dst, src);
6502 emit_int8((unsigned char)0xF3);
6503 emit_operand(rdx, src);
6504 }
6505
6506 void Assembler::blsrq(Register dst, Register src) {
6507 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6508 int encode = vex_prefix_0F38_and_encode_q_legacy(rcx, dst, src);
6509 emit_int8((unsigned char)0xF3);
6510 emit_int8((unsigned char)(0xC0 | encode));
6511 }
6512
6513 void Assembler::blsrq(Register dst, Address src) {
6514 InstructionMark im(this);
6515 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
6516 vex_prefix_0F38_q_legacy(rcx, dst, src);
6517 emit_int8((unsigned char)0xF3);
6518 emit_operand(rcx, src);
6519 }
6520
6521 void Assembler::cdqq() {
6522 prefix(REX_W);
6523 emit_int8((unsigned char)0x99);
6524 }
6525
6526 void Assembler::clflush(Address adr) {
6527 prefix(adr);
6528 emit_int8(0x0F);
6529 emit_int8((unsigned char)0xAE);
6530 emit_operand(rdi, adr);
6531 }
6532
6533 void Assembler::cmovq(Condition cc, Register dst, Register src) {
6534 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
6535 emit_int8(0x0F);
6536 emit_int8(0x40 | cc);
|